Fiting a cuboid inside a segmented layout map
I need to find a 3D cuboid that fit into corresponding images and the camera position into this cuboid.
First image is a label map with at most 5 labels (left wall, front wall, right wall, floor, ceiling).
Second image is the rendered polygons I found in the first image, each polygon being either a wall or floor or ceiling. Polygons are found by applying scipy.spatial.ConvexHull
function to each label blobs.
I also can compute the intersect corner points of those polygons if it can help.
Ressources I found: https://web.stanford.edu/class/cs331b/2016/presentations/paper5.pdf
See also questions close to this topic
 Resize ROI in an image by mantaining original image size

distributing message bits over the three colour carrier image chanels
we have given a code in one of our courses that I have not understand how its work , and we are asked to figure what it do and write a different code that should do the same function .
I have used this website for learning a lot about coding . sorry if my explanation isn't clear .
enum Used { NOT_USED, USED }; Mat_<Vec3b> state(imginput.size(), Vec3b::all(NOT_USED)); auto encoded = imginput.clone(); for (auto& pixel : `imgoutput) { uchar* location; int col, row, element; do { row = rng(imginput.rows); col = rng(imginput.cols); element = rng(3); location = &imginput.at<Vec3b>(row, col)[element]; } while (state.at<Vec3b>(row, col)[element] == USED  *location == 255); encoded.at<Vec3b>(row, col)[element] += pixel ? 0 : 1; state.at<Vec3b>(row, col)[element] = USED; }

cannot unpack noniterable numpy.float64 object python3 opencv
I am getting this error and cant understand why the issue is appearing. Below will be the code and error.
The result of the last printable workout
[8.54582258e01 9.83741381e+02] left [ 0.776281243 160.77584028] right
The code error happens in
make_coordinates
and the line isslope, intercept = line_parameters
Here's the full code:
import cv2 import numpy as np vid = cv2.VideoCapture('carDriving.mp4') def processImage(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5,5), 0) canny = cv2.Canny(blur, 50, 150) return canny def region_of_interest(image): height = image.shape[0] polygons = np.array([ [(200,height), (1200,height), (750,300)] ]) mask = np.zeros_like(image) cv2.fillPoly(mask, polygons, 255) masked_image = cv2.bitwise_and(image, mask) return masked_image def display_lines(image, lines): line_image = np.zeros_like(image) if lines is not None: for line in lines: x1, y1, x2, y2 = line.reshape(4) cv2.line(line_image, (x1, y1), (x2, y2), (255,0,0), 10) return line_image def average_slope_intercept(image, lines): left_fit = [] right_fit = [] if lines is not None: for line in lines: x1, y1, x2, y2 = line.reshape(4) parameters = np.polyfit((x1, x2), (y1, y2), 1) slope = parameters[0] intercept = parameters[1] if slope < 0: left_fit.append((slope, intercept)) else: right_fit.append((slope, intercept)) left_fit_average = np.average(left_fit, axis=0) right_fit_average = np.average(right_fit, axis=0) print(left_fit_average, 'left') print(right_fit_average, 'right') left_line = make_coordinates(image, left_fit_average) right_line = make_coordinates(image, right_fit_average) #return np.array([left_line, right_line]) def make_coordinates(image, line_parameters): slope, intercept = line_parameters y1 = image.shape[0] y2 = int(y1*3/5) x1 = int(y1  intercept)/slope x1 = int(y2  intercept)/slope return np.array([x1, y1, x2, y2]) while True: ret, frame = vid.read() grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) processed_image = processImage(frame) cropped_image = region_of_interest(processed_image) lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=5) averaged_lines = average_slope_intercept(grayFrame, lines) line_image = display_lines(cropped_image,lines) combo_image = cv2.addWeighted(grayFrame, .6, line_image, 1, 1) cv2.imshow('result', combo_image) print(lines) if cv2.waitKey(30) & 0xFF == ord('q'): break vid.release() cv2.destroyAllWindows()
and the complete error message:
Message=cannot unpack noniterable numpy.float64 object Source=C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py StackTrace: File "C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py", line 52, in make_coordinates slope, intercept = line_parameters File "C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py", line 47, in average_slope_intercept left_line = make_coordinates(image, left_fit_average) File "C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py", line 65, in <module> averaged_lines = average_slope_intercept(grayFrame, lines)
Now receiving another error, line 27, first error was fixed
Message=integer argument expected, got float Source=C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py StackTrace: File "C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py", line 27, in display_lines cv2.line(line_image, (x1, y1), (x2, y2), (255,0,0), 10) File "C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py", line 76, in <module> line_image = display_lines(cropped_image,averaged_lines)
I change line 27 to
cv2.line(line_image, int(x1, y1), int(x2, y2), (255,0,0), 10)
and get the following errorMessage='numpy.float64' object cannot be interpreted as an integer Source=C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py StackTrace: File "C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py", line 27, in display_lines cv2.line(line_image, int(x1, y1), int(x2, y2), (255,0,0), 10) File "C:\Users\Andre\source\repos\SelfDrivingCarTest\SelfDrivingCarTest\SelfDrivingCarTest.py", line 76, in <module> line_image = display_lines(cropped_image,averaged_lines)

How can I change the color of a part in a 3D model with wpf and helixtoolkit
I am using Helix toolkit's and I have "3D Women model"
The goal of the program is to be a guide to determine the best color of the hair dye and lenses for the eyes with the color of the skin
i successfully loaded this on helix and WPF form now what the idea to change and coloring ? is there another method ? other tools
I thought about using helixtoolkit?
3D simulator guide to determine the best color of the hair dye and lenses for the eyes with the color of the skin , I

Threejs object with multiple material doesn't work with RayCasting
I have created a 3d object using blender and exported it as .obj file.
I am trying to load that .obj file using OBJloader in threejs. That is a single object with multiple material in it. It is loaded without any issues.
I am trying to track which material the user has clicked in a 3d object. I am using raycasting for this. Since raycasting works only at object level, I am not able to get which specific material user has clicked.

Add a picture/thumbnail in a 3d projection matplotlib plot
In matplotlib, does anyone know how to add a picture or thumbnail to a 3dprojection plot outside of the margin of the figure? The thumbnail is a png/jpeg image from a file.
I'm thinking of something analogous to this example, but with a picture:
import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D import numpy as np import matplotlib.pyplot as plt mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = fig.gca(projection='3d') theta = np.linspace(4 * np.pi, 4 * np.pi, 100) z = np.linspace(2, 2, 100) r = z**2 + 1 x = r * np.sin(theta) y = r * np.cos(theta) ax.plot(x, y, z, label='parametric curve') ax.legend() plt.show()

DCGAN generator loss stuck at 0.7 after 5 epochs
After playing around with the PyTorch DCGAN faces tutorial, i started working with my own dataset which consists of images with size 1x32x32 (channel, height, width).
Now, i did apply most of the things from this repository: https://github.com/soumith/ganhacks
But currently i am stuck.
I made this argument to choose whether to train the generator (G) or the discriminator (D).
if i > 1: if D_G_z1 < 0.5: train_G = True train_D = False else: train_D = True train_G = False
Where i is the current batch number,
train_D
andtrain_G
are set to True on batch one.D_G_z1
is D(G(x)).I'd expect that once D is trained and D(G(x)) = 0.5, D will stop training and G will start training to improve the realism of the generated images, etc. Now D and G are training when the conditions are met.
However, the loss of G is stuck at 0.7 after 5 epochs and doesn't seem do change with 1k epochs (i havn't tried more). Changing the learning rate for G, or make G more/less complex by changing the amount of channels per ConvTranspose2d layer doesn't help either.
What's the best approach now? Any advice would be appreciated.
The code is found here: https://github.com/deKeijzer/SRONDCGAN/blob/master/notebooks/ExoGAN_v1.ipynb
TLDR: Generator loss is stuck at 0.7, does't change anymore. Neither did it 'learn' a good representation of X.

The reward becomes a penalty if a certain condition
I am working to build a reinforcement learning agent with DQN. The agent would be able to place buy and sell orders for intraday trading. I am facing a little problem with that project. The question is "how to tell the agent to maximize the profit and avoid the transaction where the profit is less than $100".
I want to maximize the profit inside a trading day and avoid to place the pair (limit buy order, limit sell order) if the profit on that transaction is less than $100. The idea here is to avoid the little noisy movements. Instead, I prefer long beautiful profitable movements. Be aware that I thought using the "Profit & Loss" as the reward function.
"I want the minimal profit per transaction to be $100" ==> It seems this is not something that is enforceable. I can train the agent to maximize profit per transaction, but how that profit is cannot be ensured.
At the beginning, I wanted to tell the agent, if the profit of a transaction is 50 dollars, I will remove 100 dollars, then It becomes a penalty of 50 dollars for the agent. I thought it was a great way to tell the agent to not place a limit buy order if you are not sure it will give us a minimal profit of 100$. It seems that all I would be doing there is simply shifting the value of the reward. The agent only cares about maximizing the sum of rewards and not taking care of individual transactions.
How to tell the agent to maximize the profit and avoid the transaction where the profit is less than 100 dollars? With that strategy, what guarantee that the agent will never make a buy/sell decision that results in less than 100 dollars profit? Does the sum of reward  # transaction * 100 can be a solution?

Keras LSTM  time series data input, binary data output
This is a follow up to my previous question and might seem a bit redundant, but please bear with me.
I am trying to create a Keras LSTM model which uses a sequence of time series data. The data is formatted as follows with input: time, value1, value2 and output(s): Y1 and Y2
Time Value1 Value2 Y1 Y2 0 900 10 1 1 1 905 3 1 0 2 999 1 1 0 3 906 7 0 1 . . . . . . . . . . n 945 2 1 0
I have calculated outputs Y1 and Y2 to train the LSTM on which is a binary value for each time instance. However, the LSTM is supposed to take a sequence of data, ex: (time, value1, value2) between rows 13 and then predict the binary output which corresponds to row 3 which I have calculated. Thus X and Y as follow:
Input X:
Time Value1 Value2 1 905 3 2 999 1 3 906 7
Output Y:
Y1 Y2 0 1
Thus far I have tried using keras.sequence.timeseriesgenerator to make the LSTM data. The code is as follows (dn is a numpy array with the first three columns corresponding to the input described above and the 4th column (Y1) the output):
train = TimeseriesGenerator(dn[:, [0, 1, 2]], dn[:, 3], length=1, sampling_rate=1, stride=1, start_index=0, end_index=int(len(df.index) * 0.8), shuffle=False, reverse=False, batch_size=3) x0, y0 = train[0] print(x0) print(y0)
This gives the Output:
x0 :[[[ 0. 900.0]] [[ 1. 905.0]] [[ 2. 999.0]]] y0 :[1. 1. 1.]
I was expecting y0 to be [ 1. ], but the sequence generator is giving an output for each row in input. Am i right to pressume that if I provide this to the LSTM then it will attempt to predict the output for all the inputs instead of considering all the inputs and giving one output prediction?
I found another thread asking for something very similar here: similar question. I copied the code and get the following output:
epoch: 0 1 (array([[[[ 0.], [ 900.0]], [[ 1.], [ 905.0]], [[ 2.], [ 999.0]]]], dtype=float32), array([[ 1., 1., 1.]], dtype=float32))
In the question asked it was informed that there was only one expected output, but this clearly gives one output for each row as well. I'm not sure these two methods are formatting x an y equal. It is difficult to visualize with all these brackets.
I hope I managed to convey my problem. How is the input and output formatted to a LSTM neural network that should consider a sequence of time data and provide a binary output(s)? I see many time series forcasting, but have not been able to translate their methods to a binary output.

Approximating 3D Meshes
I want to use machine learning to perform multiview 3D reconstruction from 2d images to create models of which I could use add to sims 4. From what I can gather, many of these systems output point clouds or polygon meshes depending on the particular implementation. I’m looking for some method by which a base model (like a generalized character model from sims 4) could be used to approximate these outputs through some kind of transformation of its vertices. I imagine a good way to do this would be to put my base model within the shell of the model I wish to approximate and then simulate a repulsive force between each of the base models vertices and a collision with the outer model so that the base model expands to fill the cavity of the simulated model and thus approximate it, ideally with each vertex of the base model having to move as little as possible. I would really like to approximate my output models in this way because it would ensure that because every approximation is just a simple transformation on the base model, any approximation can be interpolated into any other model with a slider in the CAS.
My questions are as follows:
What field of research does this fall under? What work has been done in this area? What information would be useful in learning more about this topic?

How to 3DReconstruct from tracked points
Scenario: live feed/video clip of road driving (No notion of camera position and movement).
So far I have managed to map out occurring features within consecutive frames also noting their distances. I managed to filter out matches and am left with a high percentage of accurate matches.
 From here what is the next step to establish the depth of these points?
I would like to create a 3d cloud of these features which I have found.  I guess I would also need to know the relative movement of the camera between the frames, how would I calculate this given the matches?
I am using Python, more specifically CV2 and Skimage. Any insights or references would be truly appreciated.

Reconstruct 3D position of symmetric pairs of features matlab
I have to fix a reference frame at a suitable position on the symmetry plane of the car, and reconstruct the 3D position of some of the symmetric pairs of features relative to the above reference. How can I do this?
The points on the image can be selected manually so it is not a problem at all.
The camera is zero skew, not natural and I have already found the camera calibration matrix.
Now I think I have to find the 3d equation of the simmetry plane of the car but I don't know how
This is the car:

How to understand head pose estimation angles in Python with OpenCV?
I am working through a Python and OpenCV head pose estimation tutorial found here:
https://www.learnopencv.com/headposeestimationusingopencvanddlib/
I am able to accurately project a 3D point onto the 2D image. However, I am unable to understand the practical meaning of the Euler angles (yaw, pitch, roll) that I calculate using
cv2.decomposeProjectionMatrix
I need to know whether or not the values correspond to (yaw, pitch, roll) or (roll, pitch, yaw), etc. Also, I need to understand the axis orientation used so that I can know where the degrees of rotation is measured from.
Output Image: https://www.learnopencv.com/wpcontent/uploads/2016/09/headposeexample1024x576.jpg
Output Angles:
[[179.30011146], [ 53.77756583], [176.6277211 ]]
Here is my code
#  Imports  import cv2 import numpy as np #  Main  if __name__ == "__main__": # Read Image im = cv2.imread("headPose.jpg"); size = im.shape #2D image points. If you change the image, you need to change vector image_points = np.array([ (359, 391), # Nose tip (399, 561), # Chin (337, 297), # Left eye left corner (513, 301), # Right eye right corne (345, 465), # Left Mouth corner (453, 469) # Right mouth corner ], dtype="double") # 3D model points. model_points = np.array([ (0.0, 0.0, 0.0), # Nose tip (0.0, 330.0, 65.0), # Chin (225.0, 170.0, 135.0), # Left eye left corner (225.0, 170.0, 135.0), # Right eye right corne (150.0, 150.0, 125.0), # Left Mouth corner (150.0, 150.0, 125.0) # Right mouth corner ]) # Camera internals focal_length = size[1] center = (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype = "double" ) # Lens distortion  assumed to be zero dist_coeffs = np.zeros((4,1)) # Calculate perspective and point (_, rvec, tvec) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) # Calculate Euler angles rmat = cv2.Rodrigues(rvec)[0] # rotation matrix pmat = np.hstack((rmat, tvec)) # projection matrix eulers = cv2.decomposeProjectionMatrix(pmat)[1] print(eulers) # Projecting a 3D point ## features for p in image_points: cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), 1) ## projection of multiple points proj = np.array([(0., 0., 1000.)]) (poi1, jacobian1) = cv2.projectPoints(model_points[0]+proj, rvec, tvec, camera_matrix, dist_coeffs) ## 2D space p1 = ( int(image_points[0][0]), int(image_points[0][1])) c1 = ( int(poi1[0][0][0]), int(poi1[0][0][1])) cv2.line(im, p1, c1, (255,0,0), 2) # Display image cv2.imshow("Output", im) cv2.waitKey(0)
Test Image: https://www.learnopencv.com/wpcontent/uploads/2016/09/headPose.jpg
Thanks!

Need help for pose estimation of an object using open cv c++
I have the intrinsic and distortion matrix from a calibrated camera. I am using open cv 3.2 c++. I am getting error output of pose estimation of the object. Help me to sort out this error. The values and the error image are attached below.
Mat K = (Mat_<double>(3, 3) << 5489.58773029126, 0, 1167.86282865392, 0, 5481.84660546223, 1047.21450461614, 0, 0, 1); Mat dist = (Mat_<double>(1, 5) << 0.111931171641671, 0.087488429523756156, 0.000844290429230941, 0.00204127293599477,0);
I already have image and object points, so proceeding with solvepnp for getting rotation and translation vector.So,
Mat rvecs, tvecs; vector<Point3f> end_point3D; vector<Point2f> end_point2D; end_point3D.push_back(Point3f(50, 0, 0)); end_point3D.push_back(Point3f(0, 50, 0)); end_point3D.push_back(Point3f(0, 0, 50)); solvePnP(Object_points, Image_points, K, dist, rvecs, tvecs); projectPoints(end_point3D, rvecs, tvecs, K, dist, end_point2D, noArray(), 0.0); cv::line(image, Image_points[0], end_point2D[0], cv::Scalar(255, 0, 0), 6); cv::line(image, Image_points[0], end_point2D[1], cv::Scalar(0, 255, 0), 6); cv::line(image, Image_points[0], end_point2D[2], cv::Scalar(0, 0, 255), 6);
The image is given below.error output of estimated pose
EDIT: The object points and image points are ordered properly in the same way. I am sure about the ordering, I have done the rowmajor ordering.
Size sq_size(6, 6); int Sq_length = 30; vector<Point3f>Object_points; for (int r = 0; r < sq_size.height; r++) for (int c = 0; c < sq_size.width; c++) Object_points.push_back(Point3f(r*Sq_length, c*Sq_length, 0)); vector<Point2f>Image_points; for (int i = 0; i < 36; i++) Image_points.push_back((Point2f)op_cent[i]); //rowmajor ordered image points
I have two doubts, 1.Though they are properly ordered, sometimes the pose results wrong as shown below, What could be the reason?
2.If I draw, estimated pose only at Image_points[0], it comes out to be right,but not at any other points. Can someone explain why the pose is wrong at Image_points[30]? At any point the objects pose should be same right?

Pose Estimation with rotation matrices
I'm not much of a math expert and my approach may be novice. I want to try to keep it simple and focus on optimizing the code performance.
I'm trying to do pose estimation for my QR code tracking application. I've just started out and would like to know if my approach is sound enough to work. I only need the rotation matrix, hence I think I can keep the computations simple without a 4x4 matrix and just using 2 dimensional vectors and 3x3 matrices.
I've been working on this for two days and it seems to produce results now that look like they are correct. But am I missing anything?
Rotation Estimator that does the actual work:
public class RotationEstimator { private final int GUESSES = 1000; private float[] projectionMatrix; private float[] inverseProjectionMatrix = new float[16]; private float[] inTranslation = new float[4]; private float[][] distTranslation = new float[4][4]; private float[] center = new float[4]; private Matrix3x3 rotateMatrix; public RotationEstimator(float[] projectionMatrix) { this.projectionMatrix = projectionMatrix; //Matrix.invertM(inverseProjectionMatrix, 0, projectionMatrix, 0); rotateMatrix = new Matrix3x3(); rotateMatrix.setToRotation(new Vector3(0,0,1f), 90f); for (int i = 0; i < distTranslation.length; i++) { distTranslation[i] = new float[4]; } } public void getRotationMatrix(float[][] points, Matrix3x3 result) { for (int i = 0; i < 3; i++) { center[i] = 0f; } center[3] = 1f; for (int i = 0; i < points.length; i++) { float[] point = points[i]; inTranslation[0] = point[0]; inTranslation[1] = point[1]; inTranslation[2] = 0f; inTranslation[3] = 1f; // For debug, just copy the points System.arraycopy(inTranslation, 0, distTranslation[i], 0, inTranslation.length); // Calculate the xyz of the blob with the inverse projection matrix to account for the uncalibrated camera perspective matrix. //Matrix.multiplyMV(distTranslation[i], 0, projectionMatrix, 0, inTranslation, 0); center[0] += distTranslation[i][0]; center[1] += distTranslation[i][1]; center[2] += distTranslation[i][2]; } for (int i = 0; i < 3; i++) { center[i] /= (float)points.length; // Subtract the center to make finding the matrix easier and more accurate. for (int k = 0; k < distTranslation.length; k++) { //distTranslation[k][i] = center[i]; } } result.setIdentity(); Matrix3x3 test1 = new Matrix3x3(); test1.setIdentity(); Matrix3x3 test2 = new Matrix3x3(); test2.setIdentity(); float[][] testTranslation = new float[4][4]; // Set the first trial to a random value Matrix3x3 tmp = new Matrix3x3(); Matrix3x3 diffXP = new Matrix3x3(); Matrix3x3 diffYP = new Matrix3x3(); Matrix3x3 diffZP = new Matrix3x3(); Matrix3x3 diffXM = new Matrix3x3(); Matrix3x3 diffYM = new Matrix3x3(); Matrix3x3 diffZM = new Matrix3x3(); float slope = 0.1f; diffXP.setToRotation(new Vector3(1f,0f,0f), slope); diffYP.setToRotation(new Vector3(0f,1f,0f), slope); diffZP.setToRotation(new Vector3(0f,0f,1f), slope); diffXM.setToRotation(new Vector3(1f,0f,0f), slope); diffYM.setToRotation(new Vector3(0f,1f,0f), slope); diffZM.setToRotation(new Vector3(0f,0f,1f), slope); Matrix3x3 diffTotal = new Matrix3x3(); float prevError = Float.MAX_VALUE; int guess; for (guess = 0; guess < GUESSES; guess++) { int tryAxis = guess % 3; System.out.println("Guess: " + guess); // Set matrices to test with switch (tryAxis) { case 0: diffTotal.set(diffXP); break; case 1: diffTotal.set(diffYP); break; case 2: diffTotal.set(diffZP); break; } Matrix3x3.mult(result, diffTotal, test1); // And do the opposite rotation switch (tryAxis) { case 0: diffTotal.set(diffXM); break; case 1: diffTotal.set(diffYM); break; case 2: diffTotal.set(diffZM); break; } Matrix3x3.mult(result, diffTotal, test2); // First guess resetPoints(testTranslation); for (int k = 0; k < testTranslation.length; k++) { // Multiply the parameter points with the test matrix test1.mul(testTranslation[k]); //add(testTranslation, center); for (int m = 0; m < 2; m++) { System.out.println("T1," + m + ":" + testTranslation[k][m]); } } float error1 = getError(distTranslation, testTranslation); System.out.println("Error1: " + error1); // Second guess resetPoints(testTranslation); for (int k = 0; k < testTranslation.length; k++) { // Multiply the parameter points with the test matrix test2.mul(testTranslation[k]); //add(testTranslation, center); for (int m = 0; m < 2; m++) { System.out.println("T2," + m + ":" + testTranslation[k][m]); } } float error2 = getError(distTranslation, testTranslation); System.out.println("Error2: " + error2); float error = Math.min(error1, error2); // If error is getting bigger or error is really small; return if (error < 0.1f) { System.out.println("Total guesses: " + guess + " Final error: " + error); return; } if (error > prevError) { System.out.println("Total guesses: " + guess + " Final error: " + error); System.out.println("Bigger error!"); return; } if (error1 < error2) { // Return the result result.set(test1); } else { // Return the result result.set(test2); } prevError = error; } } private static float getError(float[][] points, float[][] testPoints) { // Error is distance between points float error = 0f; for (int i = 0; i < points.length; i++) { float dx = points[i][0]  testPoints[i][0]; float dy = points[i][1]  testPoints[i][1]; error += Math.sqrt(dx * dx + dy * dy); } return error; } public static void resetPoints(float[][] points) { points[0][0] = 0f; points[0][1] = 0f; points[1][0] = 1f; points[1][1] = 0f; points[2][0] = 1f; points[2][1] = 1f; points[3][0] = 0f; points[3][1] = 1f; } }
Trial main method:
public class RotationTest { public static void main(String[] args) { float[][] points = new float[4][2]; points[0][0] = 0.055f; points[0][1] = 0.044f; points[1][0] = 0.89f; points[1][1] = .14f; points[2][0] = .9f; points[2][1] = .67f; points[3][0] = .1f; points[3][1] = .9865f; RotationEstimator guesstimator = new RotationEstimator(null); Matrix3x3 result = new Matrix3x3(); guesstimator.getRotationMatrix(points, result); } }
This is my output for those who can't run the code:
.. ... more here. .. .... Guess: 578 T1,0:0.016393749 T1,1:0.016127924 T1,0:1.0322586 T1,1:0.016127924 T1,0:1.0322586 T1,1:0.9994725 T1,0:0.016393749 T1,1:0.9994725 Error1: 0.6758817 T2,0:0.01639103 T2,1:0.016130688 T2,0:1.0320902 T2,1:0.016130688 T2,0:1.0320902 T2,1:0.9996465 T2,0:0.01639103 T2,1:0.9996465 Error2: 0.67588174 Total guesses: 578 Final error: 0.6758817