np.dot()、np.multiply()、tf.matmul()、tf.multiply()

import tensorflow as tf
import numpy as np

x1 = ([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
x2 = ([[2, 1, 1], [2, 1, 1], [2, 1, 1]])
y1 = np.dot(x1, x2)
y2 = np.multiply(x1, x2)
print('1、np.dot\n', y1)
print('2、np.multiply\n', y2)

x3 = tf.constant([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
x4 = tf.constant([[2, 1, 1], [2, 1, 1], [2, 1, 1]])
y3 = tf.matmul(x3, x4)
y4 = tf.multiply(x3, x4)

with tf.Session() as sess:
    print('3、tf.matmul\n',sess.run(y3))
    print('4、tf.multiply\n',sess.run(y4))
    print('5、np.dot\n',sess.run(np.dot(x3,x4)))
    print('6、np.multiply\n',sess.run(np.multiply(x3,x4)))

输出:

1、np.dot
 [[12  6  6]
 [12  6  6]
 [12  6  6]]
2、np.multiply
 [[2 2 3]
 [2 2 3]
 [2 2 3]]
3、tf.matmul
 [[12  6  6]
 [12  6  6]
 [12  6  6]]
4、tf.multiply
 [[2 2 3]
 [2 2 3]
 [2 2 3]]
5、np.dot
 [[2 2 3]
 [2 2 3]
 [2 2 3]]
6、np.multiply
 [[2 2 3]
 [2 2 3]
 [2 2 3]]
  • tf.matmul() 为矩阵乘法
  • tf.multiply() 为矩阵点乘
  • np.dot() 为矩阵乘法
  • np.multiply() 为矩阵点乘

注意:

 print('np.dot\n',sess.run(np.dot(x3,x4)))
 print('np.multiply\n',sess.run(np.multiply(x3,x4)))

这两个都是输出:

 [[2 2 3]
 [2 2 3]
 [2 2 3]]

就不知道为什么了!

#!/usr/bin/env python import matplotlib.pyplot as plt import numpy as np import time import cv2 from UR_Robot import UR_Robot from scipy import optimize from mpl_toolkits.mplot3d import Axes3D # User options (change me) # --------------- Setup options --------------- tcp_host_ip = '192.168.50.100' # IP and port to robot arm as TCP client (UR5) tcp_port = 30003 # workspace_limits = np.asarray([[0.3, 0.75], [0.05, 0.4], [-0.2, -0.1]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates) # workspace_limits = np.asarray([[0.35, 0.55], [0, 0.35], [0.15, 0.25]]) workspace_limits = np.asarray([[0.2, 0.4], [0.4, 0.6], [0.05, 0.1]]) calib_grid_step = 0.05 #0.05 #checkerboard_offset_from_tool = [0,-0.13,0.02] # change me! checkerboard_offset_from_tool = [0,0.121,0] tool_orientation = [-np.pi/2,0,0] # [0,-2.22,2.22] # [2.22,2.22,0] # tool_orientation = [np.pi/2,np.pi/2,np.pi/2] #--------------------------------------------- # Construct 3D calibration grid across workspace print(1 + (workspace_limits[0][1] - workspace_limits[0][0])/calib_grid_step) gridspace_x = np.linspace(workspace_limits[0][0], workspace_limits[0][1], int(1 + (workspace_limits[0][1] - workspace_limits[0][0])/calib_grid_step)) gridspace_y = np.linspace(workspace_limits[1][0], workspace_limits[1][1], int(1 + (workspace_limits[1][1] - workspace_limits[1][0])/calib_grid_step)) gridspace_z = np.linspace(workspace_limits[2][0], workspace_limits[2][1], int(1 + (workspace_limits[2][1] - workspace_limits[2][0])/calib_grid_step)) calib_grid_x, calib_grid_y, calib_grid_z = np.meshgrid(gridspace_x, gridspace_y, gridspace_z) num_calib_grid_pts = calib_grid_x.shape[0]*calib_grid_x.shape[1]*calib_grid_x.shape[2] calib_grid_x.shape = (num_calib_grid_pts,1) calib_grid_y.shape = (num_calib_grid_pts,1) calib_grid_z.shape = (num_calib_grid_pts,1) calib_grid_pts = np.concatenate((calib_grid_x, calib_grid_y, calib_grid_z), axis=1) measured_pts = [] observed_pts = [] observed_pix = [] # Move robot to home pose print('Connecting to robot...') robot = UR_Robot(tcp_host_ip,tcp_port,workspace_limits,is_use_robotiq85=False) # robot.open_gripper() # Slow down robot robot.joint_acc = 1.4 robot.joint_vel = 1.05 # Make robot gripper point upwards robot.move_j([-np.pi, -np.pi/2, np.pi/2, 0, np.pi/2, np.pi]) # self.home_joint_config = [-(0 / 360.0) * 2 * np.pi, -(90 / 360.0) * 2 * np.pi, # (0 / 360.0) * 2 * np.pi, -(90 / 360.0) * 2 * np.pi, # -(0 / 360.0) * 2 * np.pi, 0.0] # [0,-np.pi/2,0,-np.pi/2,0,0] # Move robot to each calibration point in workspace print('Collecting data...') for calib_pt_idx in range(num_calib_grid_pts): tool_position = calib_grid_pts[calib_pt_idx,:] tool_config = [tool_position[0],tool_position[1],tool_position[2], tool_orientation[0],tool_orientation[1],tool_orientation[2]] tool_config1 = [tool_position[0], tool_position[1], tool_position[2], tool_orientation[0], tool_orientation[1], tool_orientation[2]] print(f"tool position and orientation:{tool_config1}") robot.move_j_p(tool_config) time.sleep(2) # k # Find checkerboard center checkerboard_size = (5,5) refine_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) camera_color_img, camera_depth_img = robot.get_camera_data() bgr_color_data = cv2.cvtColor(camera_color_img, cv2.COLOR_RGB2BGR) gray_data = cv2.cvtColor(bgr_color_data, cv2.COLOR_RGB2GRAY) checkerboard_found, corners = cv2.findChessboardCorners(gray_data, checkerboard_size, None, cv2.CALIB_CB_ADAPTIVE_THRESH) print(checkerboard_found) if checkerboard_found: corners_refined = cv2.cornerSubPix(gray_data, corners, (5,5), (-1,-1), refine_criteria) # Get observed checkerboard center 3D point in camera space checkerboard_pix = np.round(corners_refined[12,0,:]).astype(int) checkerboard_z = camera_depth_img[checkerboard_pix[1]][checkerboard_pix[0]] checkerboard_x = np.multiply(checkerboard_pix[0]-robot.cam_intrinsics[0][2],checkerboard_z/robot.cam_intrinsics[0][0]) checkerboard_y = np.multiply(checkerboard_pix[1]-robot.cam_intrinsics[1][2],checkerboard_z/robot.cam_intrinsics[1][1]) if checkerboard_z == 0: continue # Save calibration point and observed checkerboard center observed_pts.append([checkerboard_x,checkerboard_y,checkerboard_z]) # tool_position[2] += checkerboard_offset_from_tool tool_position = tool_position + checkerboard_offset_from_tool measured_pts.append(tool_position) observed_pix.append(checkerboard_pix) # Draw and display the corners # vis = cv2.drawChessboardCorners(robot.camera.color_data, checkerboard_size, corners_refined, checkerboard_found) vis = cv2.drawChessboardCorners(bgr_color_data, (1,1), corners_refined[12,:,:], checkerboard_found) cv2.imwrite('%06d.png' % len(measured_pts), vis) cv2.imshow('Calibration',vis) cv2.waitKey(1000) # Move robot back to home pose # robot.go_home() measured_pts = np.asarray(measured_pts) observed_pts = np.asarray(observed_pts) observed_pix = np.asarray(observed_pix) world2camera = np.eye(4) # Estimate rigid transform with SVD (from Nghia Ho) def get_rigid_transform(A, B): assert len(A) == len(B) N = A.shape[0] # Total points centroid_A = np.mean(A, axis=0) centroid_B = np.mean(B, axis=0) AA = A - np.tile(centroid_A, (N, 1)) # Centre the points BB = B - np.tile(centroid_B, (N, 1)) H = np.dot(np.transpose(AA), BB) # Dot is matrix multiplication for array U, S, Vt = np.linalg.svd(H) R = np.dot(Vt.T, U.T) if np.linalg.det(R) < 0: # Special reflection case Vt[2,:] *= -1 R = np.dot(Vt.T, U.T) t = np.dot(-R, centroid_A.T) + centroid_B.T return R, t def get_rigid_transform_error(z_scale): global measured_pts, observed_pts, observed_pix, world2camera, camera # Apply z offset and compute new observed points using camera intrinsics observed_z = observed_pts[:,2:] * z_scale observed_x = np.multiply(observed_pix[:,[0]]-robot.cam_intrinsics[0][2],observed_z/robot.cam_intrinsics[0][0]) observed_y = np.multiply(observed_pix[:,[1]]-robot.cam_intrinsics[1][2],observed_z/robot.cam_intrinsics[1][1]) new_observed_pts = np.concatenate((observed_x, observed_y, observed_z), axis=1) # Estimate rigid transform between measured points and new observed points R, t = get_rigid_transform(np.asarray(measured_pts), np.asarray(new_observed_pts)) t.shape = (3,1) world2camera = np.concatenate((np.concatenate((R, t), axis=1),np.array([[0, 0, 0, 1]])), axis=0) # Compute rigid transform error registered_pts = np.dot(R,np.transpose(measured_pts)) + np.tile(t,(1,measured_pts.shape[0])) error = np.transpose(registered_pts) - new_observed_pts error = np.sum(np.multiply(error,error)) rmse = np.sqrt(error/measured_pts.shape[0]) return rmse # Optimize z scale w.r.t. rigid transform error print('Calibrating...') z_scale_init = 1 optim_result = optimize.minimize(get_rigid_transform_error, np.asarray(z_scale_init), method='Nelder-Mead') camera_depth_offset = optim_result.x # Save camera optimized offset and camera pose print('Saving...') np.savetxt('camera_depth_scale.txt', camera_depth_offset, delimiter=' ') get_rigid_transform_error(camera_depth_offset) camera_pose = np.linalg.inv(world2camera) np.savetxt('camera_pose.txt', camera_pose, delimiter=' ') print('Done.') # DEBUG CODE ----------------------------------------------------------------------------------- # np.savetxt('measured_pts.txt', np.asarray(measured_pts), delimiter=' ') # np.savetxt('observed_pts.txt', np.asarray(observed_pts), delimiter=' ') # np.savetxt('observed_pix.txt', np.asarray(observed_pix), delimiter=' ') # measured_pts = np.loadtxt('measured_pts.txt', delimiter=' ') # observed_pts = np.loadtxt('observed_pts.txt', delimiter=' ') # observed_pix = np.loadtxt('observed_pix.txt', delimiter=' ') # fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') # ax.scatter(measured_pts[:,0],measured_pts[:,1],measured_pts[:,2], c='blue') # print(camera_depth_offset) # R, t = get_rigid_transform(np.asarray(measured_pts), np.asarray(observed_pts)) # t.shape = (3,1) # camera_pose = np.concatenate((np.concatenate((R, t), axis=1),np.array([[0, 0, 0, 1]])), axis=0) # camera2robot = np.linalg.inv(camera_pose) # t_observed_pts = np.transpose(np.dot(camera2robot[0:3,0:3],np.transpose(observed_pts)) + np.tile(camera2robot[0:3,3:],(1,observed_pts.shape[0]))) # ax.scatter(t_observed_pts[:,0],t_observed_pts[:,1],t_observed_pts[:,2], c='red') # new_observed_pts = observed_pts.copy() # new_observed_pts[:,2] = new_observed_pts[:,2] * camera_depth_offset[0] # R, t = get_rigid_transform(np.asarray(measured_pts), np.asarray(new_observed_pts)) # t.shape = (3,1) # camera_pose = np.concatenate((np.concatenate((R, t), axis=1),np.array([[0, 0, 0, 1]])), axis=0) # camera2robot = np.linalg.inv(camera_pose) # t_new_observed_pts = np.transpose(np.dot(camera2robot[0:3,0:3],np.transpose(new_observed_pts)) + np.tile(camera2robot[0:3,3:],(1,new_observed_pts.shape[0]))) # ax.scatter(t_new_observed_pts[:,0],t_new_observed_pts[:,1],t_new_observed_pts[:,2], c='green') # plt.show()
最新发布
08-29
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值