Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sensor fusion #114

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions aloha_scripts/auto_record.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
if [ "$2" -lt 0 ]; then
echo "# of episodes not valid"
exit
fi

echo "Task: $1"
for (( i=0; i<$2; i++ ))
do
echo "Starting episode $i"
python3 record_episodes.py --task "$1"
if [ $? -ne 0 ]; then
echo "Failed to execute command. Returning"
exit
fi
done
69 changes: 69 additions & 0 deletions aloha_scripts/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
### Task parameters
import numpy as np

DATA_DIR = '/home/bmv/aloha/src/dataset'
TASK = 'Rice_scoop_master_150_ft'
TASK_CONFIGS = {
TASK:{
'dataset_dir': DATA_DIR + '/' + TASK,
'num_episodes': 150,
'episode_len': 700,
'camera_names': ['cam_low', 'cam_high', 'cam_wrist'],
'json_dir' : '/home/bmv/act_dec2023/segmentation' + '/' + TASK + '_json'
},
}

### ALOHA fixed constants
MASTER_IP = "192.168.137.12"
FOLLOWER_IP = "192.168.137.1"

FREQUENCY = 50 #Hz
DT = 1/FREQUENCY

CAM_WIDTH = 640
CAM_HEIGHT = 480

FRONT_CAM_ID = '936322071211'
WRIST_CAM_ID = '128422271715'
WRIST_CAM_MASTER_ID = '127122270237'

VIZ_DIR = "base_press_ft"

JOINT_NAMES = ["base", "shoulder", "elbow", "wrist_3", "wrist_2", "wrist_1"]
HOME_POSE = np.deg2rad([-93,-54,-109,-76,90,0])

# Left finger position limits (qpos[7]), right_finger = -1 * left_finger
MASTER_GRIPPER_POSITION_OPEN = 0.02417
MASTER_GRIPPER_POSITION_CLOSE = 0.01244
PUPPET_GRIPPER_POSITION_OPEN = 0.05800
PUPPET_GRIPPER_POSITION_CLOSE = 0.01844

# Gripper joint limits (qpos[6])
MASTER_GRIPPER_JOINT_OPEN = 0.3083
MASTER_GRIPPER_JOINT_CLOSE = -0.6842
PUPPET_GRIPPER_JOINT_OPEN = 1.4910
PUPPET_GRIPPER_JOINT_CLOSE = -0.6213

############################ Helper functions ############################

MASTER_GRIPPER_POSITION_NORMALIZE_FN = lambda x: (x - MASTER_GRIPPER_POSITION_CLOSE) / (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE)
PUPPET_GRIPPER_POSITION_NORMALIZE_FN = lambda x: (x - PUPPET_GRIPPER_POSITION_CLOSE) / (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE)
MASTER_GRIPPER_POSITION_UNNORMALIZE_FN = lambda x: x * (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) + MASTER_GRIPPER_POSITION_CLOSE
PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN = lambda x: x * (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) + PUPPET_GRIPPER_POSITION_CLOSE
MASTER2PUPPET_POSITION_FN = lambda x: PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(MASTER_GRIPPER_POSITION_NORMALIZE_FN(x))

MASTER_GRIPPER_JOINT_NORMALIZE_FN = lambda x: (x - MASTER_GRIPPER_JOINT_CLOSE) / (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE)
PUPPET_GRIPPER_JOINT_NORMALIZE_FN = lambda x: (x - PUPPET_GRIPPER_JOINT_CLOSE) / (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE)
MASTER_GRIPPER_JOINT_UNNORMALIZE_FN = lambda x: x * (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) + MASTER_GRIPPER_JOINT_CLOSE
PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN = lambda x: x * (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) + PUPPET_GRIPPER_JOINT_CLOSE
MASTER2PUPPET_JOINT_FN = lambda x: PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN(MASTER_GRIPPER_JOINT_NORMALIZE_FN(x))

MASTER_GRIPPER_VELOCITY_NORMALIZE_FN = lambda x: x / (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE)
PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN = lambda x: x / (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE)

MASTER_POS2JOINT = lambda x: MASTER_GRIPPER_POSITION_NORMALIZE_FN(x) * (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) + MASTER_GRIPPER_JOINT_CLOSE
MASTER_JOINT2POS = lambda x: MASTER_GRIPPER_POSITION_UNNORMALIZE_FN((x - MASTER_GRIPPER_JOINT_CLOSE) / (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE))
PUPPET_POS2JOINT = lambda x: PUPPET_GRIPPER_POSITION_NORMALIZE_FN(x) * (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) + PUPPET_GRIPPER_JOINT_CLOSE
PUPPET_JOINT2POS = lambda x: PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN((x - PUPPET_GRIPPER_JOINT_CLOSE) / (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE))

MASTER_GRIPPER_JOINT_MID = (MASTER_GRIPPER_JOINT_OPEN + MASTER_GRIPPER_JOINT_CLOSE)/2
121 changes: 121 additions & 0 deletions aloha_scripts/real_env.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
import sys
directory_to_add = "~/act_dec2023"
sys.path.append(directory_to_add)


import time
import numpy as np
import collections
import matplotlib.pyplot as plt
import dm_env

from aloha_scripts.constants import DT, HOME_POSE, MASTER_IP, FOLLOWER_IP
from aloha_scripts.robot_utils import Recorder, ImageRecorder
from aloha_scripts.teleop import Follower, Master, generateTrajectory
# from interbotix_xs_modules.arm import InterbotixManipulatorXS
# from interbotix_xs_msgs.msg import JointSingleCommand

# import IPython
# e = IPython.embed


class RealEnv:
"""
Environment for real robot bi-manual manipulation
Action space: [left_arm_qpos (6), # absolute joint position
left_gripper_positions (1), # normalized gripper position (0: close, 1: open)
right_arm_qpos (6), # absolute joint position
right_gripper_positions (1),] # normalized gripper position (0: close, 1: open)

Observation space: {"qpos": Concat[ left_arm_qpos (6), # absolute joint position
left_gripper_position (1), # normalized gripper position (0: close, 1: open)
right_arm_qpos (6), # absolute joint position
right_gripper_qpos (1)] # normalized gripper position (0: close, 1: open)
"qvel": Concat[ left_arm_qvel (6), # absolute joint velocity (rad)
left_gripper_velocity (1), # normalized gripper velocity (pos: opening, neg: closing)
right_arm_qvel (6), # absolute joint velocity (rad)
right_gripper_qvel (1)] # normalized gripper velocity (pos: opening, neg: closing)
"images": {"cam_high": (480x640x3), # h, w, c, dtype='uint8'
"cam_low": (480x640x3), # h, w, c, dtype='uint8'
"cam_left_wrist": (480x640x3), # h, w, c, dtype='uint8'
"cam_right_wrist": (480x640x3)} # h, w, c, dtype='uint8'
"""

def __init__(self):
self.puppet = Follower(FOLLOWER_IP)
self.n_obs_steps = 2
# self.recorder = Recorder('left', init_node=False)
self.image_recorder = ImageRecorder(init_node=True)


def get_qpos(self):
qpos = self.puppet.getJointAngles()
return np.array(qpos)

def get_qvel(self):
qvel = self.puppet.getJointVelocity()
return np.array(qvel)

def get_effort(self):
qeff = self.puppet.getJointEffort()
return np.array(qeff)

def get_images(self):
return self.image_recorder.get_images()

def get_timestamp(self):
return time.time()

def get_TCP(self):
tcp = self.puppet.getTCPPosition()
return np.array(tcp)

def get_observation(self):
obs = dict(self.get_images())
obs['robot_joint'] = self.get_qpos()
obs['robot_eef_pose'] = self.get_TCP()
obs['timestamp'] = self.get_timestamp()
return obs

def get_reward(self):
return 0

def reset(self, fake=False):
if not fake:
initial_observation = {}
for step in range(self.n_obs_steps):
obs_data = self.get_observation()
for key, value in obs_data.items():
# Check if the key is already in the dictionary
if key not in initial_observation:
initial_observation[key] = [value]
else:
initial_observation[key].append(value)

return dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
reward=self.get_reward(),
discount=None,
observation=initial_observation)

def step(self, action):
self.puppet.operate(action)
# time.sleep(DT)
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=self.get_reward(),
discount=None,
observation=self.get_observation())


def get_action(master):
action = np.zeros(6) # 6 joint + 1 gripper, for two arms
# Arm actions
action = master.getJointAngles()

return action


def make_real_env():
env = RealEnv()
return env
181 changes: 181 additions & 0 deletions aloha_scripts/robot_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
import numpy as np
import time
from .constants import DT
from collections import deque
import rospy
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
class ImageRecorder:
def __init__(self, init_node=True, is_debug=False):

self.is_debug = is_debug
self.bridge = CvBridge()
self.camera_names = ['cam_high', 'cam_low', 'cam_wrist']
if init_node:
rospy.init_node('image_recorder', anonymous=True)
for cam_name in self.camera_names:
setattr(self, f'{cam_name}_image', None)
setattr(self, f'{cam_name}_secs', None)
setattr(self, f'{cam_name}_nsecs', None)
if cam_name == 'cam_high':
callback_func = self.image_cb_cam_high
elif cam_name == 'cam_low':
callback_func = self.image_cb_cam_low
elif cam_name == 'cam_wrist':
callback_func = self.image_cb_cam_wrist
else:
raise NotImplementedError
rospy.Subscriber(f"/usb_{cam_name}/image_raw", Image, callback_func)
if self.is_debug:
setattr(self, f'{cam_name}_timestamps', deque(maxlen=50))
time.sleep(0.5)

def image_cb(self, cam_name, data):
setattr(self, f'{cam_name}_image', self.bridge.imgmsg_to_cv2(data, desired_encoding='bgr8'))
setattr(self, f'{cam_name}_secs', data.header.stamp.secs)
setattr(self, f'{cam_name}_nsecs', data.header.stamp.nsecs)


if self.is_debug:
getattr(self, f'{cam_name}_timestamps').append(data.header.stamp.secs + data.header.stamp.secs * 1e-9)

def image_cb_cam_high(self, data):
cam_name = 'cam_high'
return self.image_cb(cam_name, data)

def image_cb_cam_low(self, data):
cam_name = 'cam_low'
return self.image_cb(cam_name, data)

def image_cb_cam_wrist(self, data):
cam_name = 'cam_wrist'
return self.image_cb(cam_name, data)

def get_images(self):
image_dict = dict()
new_img_dict = dict()
for cam_name in self.camera_names:
image_dict[cam_name] = getattr(self, f'{cam_name}_image')
for i,v in enumerate(image_dict):
new_img_dict[f'camera_{i}'] = image_dict[v]
return new_img_dict

def print_diagnostics(self):
def dt_helper(l):
l = np.array(l)
diff = l[1:] - l[:-1]
return np.mean(diff)
for cam_name in self.camera_names:
image_freq = 1 / dt_helper(getattr(self, f'{cam_name}_timestamps'))
print(f'{cam_name} {image_freq=:.2f}')
print()

class Recorder:
def __init__(self, side, init_node=True, is_debug=False):
from collections import deque
import rospy
from sensor_msgs.msg import JointState
from teleop import Follower, Master, generateTrajectory

self.secs = None
self.nsecs = None
self.qpos = None
self.effort = None
self.arm_command = None
# self.gripper_command = None
self.is_debug = is_debug

if init_node:
rospy.init_node('recorder', anonymous=True)
# rospy.Subscriber(f"/puppet_{side}/joint_states", JointState, self.puppet_state_cb)
# rospy.Subscriber(f"/puppet_{side}/commands/joint_group", JointGroupCommand, self.puppet_arm_commands_cb)
# rospy.Subscriber(f"/puppet_{side}/commands/joint_single", JointSingleCommand, self.puppet_gripper_commands_cb)
if self.is_debug:
self.joint_timestamps = deque(maxlen=50)
self.arm_command_timestamps = deque(maxlen=50)
# self.gripper_command_timestamps = deque(maxlen=50)
time.sleep(0.1)

def puppet_state_cb(self, data):
self.qpos = data.position
self.qvel = data.velocity
self.effort = data.effort
self.data = data
if self.is_debug:
self.joint_timestamps.append(time.time())

def puppet_arm_commands_cb(self, data):
self.arm_command = data.cmd
if self.is_debug:
self.arm_command_timestamps.append(time.time())

# def puppet_gripper_commands_cb(self, data):
# self.gripper_command = data.cmd
# if self.is_debug:
# self.gripper_command_timestamps.append(time.time())

def print_diagnostics(self):
def dt_helper(l):
l = np.array(l)
diff = l[1:] - l[:-1]
return np.mean(diff)

joint_freq = 1 / dt_helper(self.joint_timestamps)
arm_command_freq = 1 / dt_helper(self.arm_command_timestamps)
# gripper_command_freq = 1 / dt_helper(self.gripper_command_timestamps)

print(f'{joint_freq=:.2f}\n{arm_command_freq=:.2f}\n')

def get_arm_joint_positions(bot):
return bot.arm.core.joint_states.position[:6]

# def get_arm_gripper_positions(bot):
# joint_position = bot.gripper.core.joint_states.position[6]
# return joint_position

def move_arms(bot_list, target_pose_list, move_time=1):
num_steps = int(move_time / DT)
curr_pose_list = [get_arm_joint_positions(bot) for bot in bot_list]
traj_list = [np.linspace(curr_pose, target_pose, num_steps) for curr_pose, target_pose in zip(curr_pose_list, target_pose_list)]
for t in range(num_steps):
for bot_id, bot in enumerate(bot_list):
bot.arm.set_joint_positions(traj_list[bot_id][t], blocking=False)
time.sleep(DT)

# def move_grippers(bot_list, target_pose_list, move_time):
# gripper_command = JointSingleCommand(name="gripper")
# num_steps = int(move_time / DT)
# curr_pose_list = [get_arm_gripper_positions(bot) for bot in bot_list]
# traj_list = [np.linspace(curr_pose, target_pose, num_steps) for curr_pose, target_pose in zip(curr_pose_list, target_pose_list)]
# for t in range(num_steps):
# for bot_id, bot in enumerate(bot_list):
# gripper_command.cmd = traj_list[bot_id][t]
# bot.gripper.core.pub_single.publish(gripper_command)
# time.sleep(DT)

# def setup_puppet_bot(bot):
# bot.dxl.robot_reboot_motors("single", "gripper", True)
# bot.dxl.robot_set_operating_modes("group", "arm", "position")
# bot.dxl.robot_set_operating_modes("single", "gripper", "current_based_position")
# torque_on(bot)

# def setup_master_bot(bot):
# bot.dxl.robot_set_operating_modes("group", "arm", "pwm")
# bot.dxl.robot_set_operating_modes("single", "gripper", "current_based_position")
# torque_off(bot)

# def set_standard_pid_gains(bot):
# bot.dxl.robot_set_motor_registers("group", "arm", 'Position_P_Gain', 800)
# bot.dxl.robot_set_motor_registers("group", "arm", 'Position_I_Gain', 0)

# def set_low_pid_gains(bot):
# bot.dxl.robot_set_motor_registers("group", "arm", 'Position_P_Gain', 100)
# bot.dxl.robot_set_motor_registers("group", "arm", 'Position_I_Gain', 0)

# def torque_off(bot):
# bot.dxl.robot_torque_enable("group", "arm", False)
# bot.dxl.robot_torque_enable("single", "gripper", False)

# def torque_on(bot):
# bot.dxl.robot_torque_enable("group", "arm", True)
# bot.dxl.robot_torque_enable("single", "gripper", True)
Loading