/* Display RGB primaries */

Xr = 21.2832;

Yr = 10.4607;

Zr = 0.347948;

Xg = 14.0206;

Yg = 29.792;

Zg = 4.69381;

Xb = 5.6572;

Yb = 2.91228;

Zb = 31.8093;

/* inverse matrix */

det = Xr*(Yg*Zb-Yb*Zg) + Yr*(Zg*Xb-Zb*Xg) + Zr*(Xg*Yb-Xb*Yg);

Rx=(Yg*Zb-Yb*Zg)/det; Ry=(Zg*Xb-Zb*Xg)/det; Rz=(Xg*Yb-Xb*Yg)/det;

Gx=(Yb*Zr-Yr*Zb)/det; Gy=(Zb*Xr-Zr*Xb)/det; Gz=(Xb*Yr-Xr*Yb)/det;

Bx=(Yr*Zg-Yg*Zr)/det; By=(Zr*Xg-Zg*Xr)/det; Bz=(Xr*Yg-Xg*Yr)/det;

/* Convert XYZ to monitor RGB primaries */

R = Rx*X + Ry*Y + Rz*Z;

G = Gx*X + Gy*Y + Gz*Z;

B = Bx*X + By*Y + Bz*Z;

Hope this helps.

Cheers.

Richard Kirk

Now if the function is far from being linear, indeed, the linear least squares solution is not going to be satisfactory, and a nonlinear least squares will have to be considered.

(I’d start by trying the linear fit with a constant term. Richard said the hardware is supposed to be manually calibrated such that the constant term will be 0, so if we need a significant constant term, this is a good sign that the linear fit is a poor fit. The R² error coefficient can of course be computed as well, GSL also does that for you.)

]]>In numpy, assuming x and y are 3xn and n > 3:

A = scipy.linalg.lstsq(x.T, y.T)[0]

This is the least-squares solution to y = A x.

If you have additional constraints on A, like that it is orthogonal, then look into the Procrustes problem. See also: http://en.wikipedia.org/wiki/User:BenFrantzDale/Rotation_estimation

]]>http://emphaticallystatic.org/media/transform.tar.bz2

Given two files containing your sets of points (input.csv and output.csv), the linked script computes the transformation parameters (R: rotation matrix, T: translation vector, c: scaling factor) that relates the points via:

y_i = c*R*x_i + t

The archive I’ve linked to also contains the paper that describes the algorithm I’ve implemented, and can be run from the command line as:

octave transformation.m

One can also generate fake data via octave generate_data.m to test the script.

]]>#!/bin/sh

#

# Find the 3×3 matrix for the linear relation between two sets

# of (linear) RGB triplets. Method is described there:

# http://kybele.psych.cornell.edu/~edelman/Course/lightness/node23.html

#

# Program is optimized in order to handle several millions of pixels

# with absolutely no memory usage.

#

# Datas are read from the standard input as a six columns-based set

#

# Columns 1,2 and 3 are the TARGET datas

# Columns 4,5 and 6 are the INITIAL datas

#

# The final matrix is:

#

# [ R_target ] [ a b c ] [ R_initial ]

# [ G_target ] = [ d e f ] [ G_initial ]

# [ B_target ] [ g h i ] [ B_initial ]

#

awk ‘

BEGIN {

print “scale=48;”;

print “am1=0;am2=0;am3=0;am4=0;am5=0;am6=0;am7=0;am8=0;am9=0;”;

print “bm1=0;bm2=0;bm3=0;bm4=0;bm5=0;bm6=0;bm7=0;bm8=0;bm9=0;”;

}

{

print “am1+=” $1 “*” $4 “;am2+=” $1 “*” $5 “;am3+=” $1 “*” $6 “;”;

print “am4+=” $2 “*” $4 “;am5+=” $2 “*” $5 “;am6+=” $2 “*” $6 “;”;

print “am7+=” $3 “*” $4 “;am8+=” $3 “*” $5 “;am9+=” $3 “*” $6 “;”;

print “bm1+=” $4 “*” $4 “;bm2+=” $4 “*” $5 “;bm3+=” $4 “*” $6 “;”;

print “bm5+=” $5 “*” $5 “;bm6+=” $5 “*” $6 “;bm9+=” $6 “*” $6 “;”;

}

END {

print “bm4=bm2;bm7=bm3;bm8=bm6;”;

print “print ((bm9*bm5-bm8*bm6)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am1+(((-bm9*bm4+bm7*bm6)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am2+((bm8*bm4-bm7*bm5)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am3),\”\\n\”;”;

print “print ((-bm9*bm2+bm8*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am1+(((bm9*bm1-bm7*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am2+((-bm8*bm1+bm7*bm2)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am3),\”\\n\”;”;

print “print ((bm6*bm2-bm5*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am1+(((-bm6*bm1+bm4*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am2+((bm5*bm1-bm4*bm2)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am3),\”\\n\”;”;

print “print ((bm9*bm5-bm8*bm6)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am4+(((-bm9*bm4+bm7*bm6)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am5+((bm8*bm4-bm7*bm5)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am6),\”\\n\”;”;

print “print ((-bm9*bm2+bm8*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am4+(((bm9*bm1-bm7*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am5+((-bm8*bm1+bm7*bm2)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am6),\”\\n\”;”;

print “print ((bm6*bm2-bm5*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am4+(((-bm6*bm1+bm4*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am5+((bm5*bm1-bm4*bm2)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am6),\”\\n\”;”;

print “print ((bm9*bm5-bm8*bm6)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am7+(((-bm9*bm4+bm7*bm6)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am8+((bm8*bm4-bm7*bm5)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am9),\”\\n\”;”;

print “print ((-bm9*bm2+bm8*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am7+(((bm9*bm1-bm7*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am8+((-bm8*bm1+bm7*bm2)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am9),\”\\n\”;”;

print “print ((bm6*bm2-bm5*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am7+(((-bm6*bm1+bm4*bm3)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am8+((bm5*bm1-bm4*bm2)/((bm9*bm5-bm8*bm6)*bm1+((-bm9*bm4+bm7*bm6)*bm2+(bm8*bm4-bm7*bm5)*bm3)))*am9),\”\\n\”;”;

}’ | bc

The main drawback is that the QR decomposition isn’t as good at revealing the rank of the matrix as the SVD, so you can run into trouble if the matrix does not have full rank, or is close to not having full rank (i.e. is ill-conditioned). (Handling that case also requires extra work in the QR method, because you have to use a pivoting QR factorization, whereas in the SVD method, you just have to zero the entries where you would divide by zero.)

For our problem, this means: If the coefficients ai1, ai2 and ai3 such that yi=ai1 x1+ai2 x2+ai3 x3 (or with a constant term, the coefficients ai1, ai2, ai3 and ci such that yi=ai1 x1+ai2 x2+ai3 x3+ci) are expected to be unique, the QR method should be good enough. If you expect that there might be multiple (infinitely many) solutions, you will have to use the SVD method and check for zeros in S.

]]>