// generate light direction vectors (unit) for N lights, given slant angle (radians) and start azimuth (degrees)
static void generateLightDirections(int nLights, double slantRad, double startAngleDeg, vector<Vec3d>& Lout)
{
Lout.clear();
for (int k = 0; k < nLights; ++k) {
double azDeg = startAngleDeg + k * (360.0 / nLights);
double az = azDeg * CV_PI / 180.0;
double lx = sin(slantRad) * cos(az);
double ly = sin(slantRad) * sin(az);
double lz = cos(slantRad); // pointing toward camera
Vec3d lv(lx, ly, lz);
double norm = cv::norm(lv);
if (norm > 0) lv /= norm;
Lout.push_back(lv);
}
}
// compute pseudo-inverse P = (L^T L)^{-1} L^T for an (m x 3) L (m lights)
static Mat computeLightPseudoInverse(const vector<Vec3d>& L)
{
int m = (int)L.size();
Mat Lmat(m, 3, CV_64F);
for (int i = 0; i < m; ++i) {
Lmat.at<double>(i, 0) = L[i][0];
Lmat.at<double>(i, 1) = L[i][1];
Lmat.at<double>(i, 2) = L[i][2];
}
Mat Lt; transpose(Lmat, Lt); // 3 x m
Mat LtL = Lt * Lmat; // 3 x 3
Mat LtL_inv;
invert(LtL, LtL_inv, DECOMP_SVD); // safer inversion
Mat pseudo = LtL_inv * Lt; // 3 x m
return pseudo; // 3 x m, so g = pseudo * Ivec (m x 1)
}
int main()
{
// -----------------------
// Parameters (tune these)
// -----------------------
vector<string> files = {
"J:\\3\\1.bmp",
"J:\\3\\2.bmp",
"J:\\3\\3.bmp",
"J:\\3\\4.bmp"
};
int iImageNum = (int)files.size(); // must be 4 or 8 in your lib; here we support any >=3 (but photometric stereo requires >=3)
double dSlantAngle = CV_PI / 4.0; // 45 degrees slant
int iPlaceMode = 1; // 0 -> start at 0°, 1 -> optimized start (we choose 45 deg)
double startAngleDeg = (iPlaceMode == 1) ? 45.0 : 0.0;
// curve params (from your example)
double fCurveSigma = 2.0; // smooth sigma for computed curvature (used later)
double fCurveScale = 50.0; // stretch
double fCurveShift = 128.0; // offset
// normal enhancement params
bool bNormalEnhance = true;
int iEnhanceWin = 21; // gaussian kernel size for local mean (must be odd)
double fEnhanceScale = 2.0; // factor that amplifies high-frequency components of normals
// normal maps display scaling
double fNXScale = 100.0, fNXShift = 128.0;
double fNYScale = 100.0, fNYShift = 128.0;
double fNZScale = 800.0, fNZShift = -600.0;
// -----------------------
// Read images (grayscale)
// -----------------------
vector<Mat> imgs;
for (auto& p : files) {
Mat im = imread(p, IMREAD_GRAYSCALE);
if (im.empty()) { cerr << "Failed to read " << p << endl; return -1; }
imgs.push_back(im);
}
int width = imgs[0].cols, height = imgs[0].rows;
for (size_t i = 1; i < imgs.size(); ++i) {
if (imgs[i].cols != width || imgs[i].rows != height) { cerr << "Input sizes mismatch\n"; return -1; }
}
// -----------------------
// Generate light directions and pseudo-inverse
// -----------------------
vector<Vec3d> L;
generateLightDirections(iImageNum, dSlantAngle, startAngleDeg, L);
Mat pseudo = computeLightPseudoInverse(L); // 3 x m
// -----------------------
// Prepare mats: convert input to CV_64F
// -----------------------
vector<Mat> imgs_f;
for (int i = 0; i < iImageNum; i++) {
Mat tmp; imgs[i].convertTo(tmp, CV_64F); // pixel intensities as double
imgs_f.push_back(tmp);
}
// Compute mean image (optional)
Mat meanImg = Mat::zeros(height, width, CV_64F);
for (int i = 0; i < iImageNum; i++) meanImg += imgs_f[i];
meanImg /= static_cast<double>(iImageNum);
// save mean for reference (convert to 8U)
{
Mat mean8; normalize(meanImg, mean8, 0, 255, NORM_MINMAX); mean8.convertTo(mean8, CV_8U);
imwrite("J:\\3\\mean_image.png", mean8);
}
// -----------------------
// Solve per-pixel for g = rho * n (3-vector)
// g = pseudo * I_vec
// We'll compute g_x,g_y,g_z as CV_64F images
// -----------------------
Mat gX = Mat::zeros(height, width, CV_64F);
Mat gY = Mat::zeros(height, width, CV_64F);
Mat gZ = Mat::zeros(height, width, CV_64F);
Mat albedo = Mat::zeros(height, width, CV_64F);
// Precompute pseudo as array for speed
// pseudo is 3 x m
int m = iImageNum;
vector<vector<double>> p(3, vector<double>(m));
for (int r = 0; r < 3; r++) for (int c = 0; c < m; c++) p[r][c] = pseudo.at<double>(r, c);
// For each pixel, build Ivec and compute g = pseudo * Ivec
for (int y = 0; y < height; y++) {
// pointers
vector<const double*> iptr(m);
for (int k = 0; k < m; k++) iptr[k] = imgs_f[k].ptr<double>(y);
double* gxptr = gX.ptr<double>(y);
double* gyptr = gY.ptr<double>(y);
double* gzptr = gZ.ptr<double>(y);
double* alphaptr = albedo.ptr<double>(y);
for (int x = 0; x < width; x++) {
// construct Ivec
// compute g components directly: g_r = sum_c p[r][c] * I_c
double Ivals[8]; // supports up to 8 lights if needed
for (int c = 0; c < m; c++) Ivals[c] = iptr[c][x];
double gx = 0, gy = 0, gz = 0;
for (int c = 0; c < m; c++) {
gx += p[0][c] * Ivals[c];
gy += p[1][c] * Ivals[c];
gz += p[2][c] * Ivals[c];
}
gxptr[x] = gx; gyptr[x] = gy; gzptr[x] = gz;
double rho = sqrt(gx * gx + gy * gy + gz * gz);
alphaptr[x] = rho;
}
}
// compute normals n = g / rho, handle rho==0
Mat nx = Mat::zeros(height, width, CV_64F);
Mat ny = Mat::zeros(height, width, CV_64F);
Mat nz = Mat::zeros(height, width, CV_64F);
for (int y = 0; y < height; y++) {
const double* gxptr = gX.ptr<double>(y);
const double* gyptr = gY.ptr<double>(y);
const double* gzptr = gZ.ptr<double>(y);
const double* rptr = albedo.ptr<double>(y);
double* nxp = nx.ptr<double>(y);
double* nyp = ny.ptr<double>(y);
double* nzp = nz.ptr<double>(y);
for (int x = 0; x < width; x++) {
double rho = rptr[x];
if (rho > 1e-6) {
nxp[x] = gxptr[x] / rho;
nyp[x] = gyptr[x] / rho;
nzp[x] = gzptr[x] / rho;
}
else {
nxp[x] = 0; nyp[x] = 0; nzp[x] = 1.0; // fallback normal
}
}
}
// -----------------------
// Optional: Normal enhancement
// Approach: compute local mean normal via Gaussian blur, then amplify high-frequency:
// n_enh = normalize( n + fEnhanceScale * (n - mean_n) )
// -----------------------
if (bNormalEnhance) {
Mat nx_blur, ny_blur, nz_blur;
int ksize = (iEnhanceWin % 2 == 1) ? iEnhanceWin : (iEnhanceWin + 1);
GaussianBlur(nx, nx_blur, Size(ksize, ksize), 0, 0, BORDER_REPLICATE);
GaussianBlur(ny, ny_blur, Size(ksize, ksize), 0, 0, BORDER_REPLICATE);
GaussianBlur(nz, nz_blur, Size(ksize, ksize), 0, 0, BORDER_REPLICATE);
Mat nx_new = Mat::zeros(height, width, CV_64F);
Mat ny_new = Mat::zeros(height, width, CV_64F);
Mat nz_new = Mat::zeros(height, width, CV_64F);
for (int y = 0; y < height; y++) {
const double* nxp = nx.ptr<double>(y);
const double* nyp = ny.ptr<double>(y);
const double* nzp = nz.ptr<double>(y);
const double* bnxp = nx_blur.ptr<double>(y);
const double* bnyp = ny_blur.ptr<double>(y);
const double* bnzp = nz_blur.ptr<double>(y);
double* nxe = nx_new.ptr<double>(y);
double* nye = ny_new.ptr<double>(y);
double* nze = nz_new.ptr<double>(y);
for (int x = 0; x < width; x++) {
double dx = nxp[x] - bnxp[x];
double dy = nyp[x] - bnyp[x];
double dz = nzp[x] - bnzp[x];
double ex = nxp[x] + fEnhanceScale * dx;
double ey = nyp[x] + fEnhanceScale * dy;
double ez = nzp[x] + fEnhanceScale * dz;
double norm = sqrt(ex * ex + ey * ey + ez * ez);
if (norm > 1e-6) { nxe[x] = ex / norm; nye[x] = ey / norm; nze[x] = ez / norm; }
else { nxe[x] = 0; nye[x] = 0; nze[x] = 1.0; }
}
}
nx = nx_new; ny = ny_new; nz = nz_new;
}
// -----------------------
// Optionally save normal maps (scaled + shifted)
// Map: value = n*scale + shift then clip 0..255 and convert to 8U
// -----------------------
Mat nx_vis, ny_vis, nz_vis;
{
Mat tmp = nx * fNXScale + fNXShift;
tmp.convertTo(tmp, CV_64F); // ensure double
threshold(tmp, tmp, 255, 255, THRESH_TRUNC);
threshold(tmp, tmp, 0, 0, THRESH_TOZERO);
tmp.convertTo(nx_vis, CV_8U);
tmp = ny * fNYScale + fNYShift;
threshold(tmp, tmp, 255, 255, THRESH_TRUNC);
threshold(tmp, tmp, 0, 0, THRESH_TOZERO);
tmp.convertTo(ny_vis, CV_8U);
tmp = nz * fNZScale + fNZShift;
threshold(tmp, tmp, 255, 255, THRESH_TRUNC);
threshold(tmp, tmp, 0, 0, THRESH_TOZERO);
tmp.convertTo(nz_vis, CV_8U);
imwrite("J:\\3\\norm_x.png", nx_vis);
imwrite("J:\\3\\norm_y.png", ny_vis);
imwrite("J:\\3\\norm_z.png", nz_vis);
}
// -----------------------
// Compute curvature from normal field
// Simple approximation: curvature ≈ d(nx)/dx + d(ny)/dy
// Compute derivatives with Sobel on nx and ny
// -----------------------
Mat dnx_dx, dny_dy;
Sobel(nx, dnx_dx, CV_64F, 1, 0, 3, 1.0, 0.0, BORDER_DEFAULT);
Sobel(ny, dny_dy, CV_64F, 0, 1, 3, 1.0, 0.0, BORDER_DEFAULT);
Mat curvature = dnx_dx + dny_dy; // CV_64F
// smooth curvature with fCurveSigma
if (fCurveSigma > 0.0) {
int k = (int)(fCurveSigma * 3) * 2 + 1;
if (k < 3) k = 3;
GaussianBlur(curvature, curvature, Size(k, k), fCurveSigma, fCurveSigma, BORDER_REPLICATE);
}
// Apply stretch and shift (scale + offset)
Mat curvature_scaled = curvature * fCurveScale + fCurveShift;
// Clip to 0..255
Mat curvature_clipped;
threshold(curvature_scaled, curvature_clipped, 255, 255, THRESH_TRUNC);
threshold(curvature_clipped, curvature_clipped, 0, 0, THRESH_TOZERO);
// Convert to 8U for saving
Mat curvature8;
curvature_clipped.convertTo(curvature8, CV_8U);
// Save outputs
imwrite("J:\\3\\curvature.png", curvature8);
// Also save an RGB visualization combining NX/NY/NZ if desired
Mat colorNorm;
cvtColor(nx_vis, colorNorm, COLOR_GRAY2BGR); // initialize
// create an RGB where R=nx, G=ny, B=nz
vector<Mat> ch;
ch.push_back(nz_vis); // B
ch.push_back(ny_vis); // G
ch.push_back(nx_vis); // R
Mat normalColor;
merge(ch, normalColor);
imwrite("J:\\3\\normal_color.png", normalColor);
cout << "Done. Saved: mean_image.png, norm_x.png, norm_y.png, norm_z.png, normal_color.png, curvature.png\n";
return 0;
}
1761

被折叠的 条评论
为什么被折叠?



