diff --git a/include/lut-calibrator/CapturedColor.h b/include/lut-calibrator/CapturedColor.h index 5a4501c8c..17175f0da 100644 --- a/include/lut-calibrator/CapturedColor.h +++ b/include/lut-calibrator/CapturedColor.h @@ -48,8 +48,9 @@ class CapturedColor int sourceRGBdelta = 0; std::list finalRGB; double3 color; - std::map inputColors; - std::list> sortedInputColors; + std::list> inputColors; + std::list> sortedInputYUVColors; + std::list> sortedInputYuvColors; byte3 min, max; byte3 colorInt; @@ -57,8 +58,6 @@ class CapturedColor CapturedColor() = default; const double& y() const { return color.x; } - const double& u() const { return color.y; } - const double& v() const { return color.z; } const double3& yuv() const { return color; } const uint8_t& Y() const { return colorInt.x; } @@ -69,13 +68,14 @@ class CapturedColor bool calculateFinalColor(); bool hasAllSamples(); bool hasAnySample(); - std::list> getInputColors(); + std::list> getInputYUVColors() const; + std::list> getInputYuvColors() const; void addColor(ColorRgb i); - void addColor(const double3& i); + void addColor(const byte3& i); void setSourceRGB(byte3 _color); - int getSourceError(const int3& _color); + int getSourceError(const int3& _color) const; int3 getSourceRGB() const; - void setFinalRgb(double3 _color); + void setFinalRGB(byte3 input); std::list getFinalRGB() const; QString toString(); diff --git a/sources/lut-calibrator/BoardUtils.cpp b/sources/lut-calibrator/BoardUtils.cpp index 88b167f32..51ddd6a84 100644 --- a/sources/lut-calibrator/BoardUtils.cpp +++ b/sources/lut-calibrator/BoardUtils.cpp @@ -481,7 +481,7 @@ namespace BoardUtils for(const auto& currentArray : arrayMark) { - myfile << currentArray.first; + myfile << "capturedData = " << currentArray.first; for (int r = 0; r < SCREEN_COLOR_DIMENSION; r++) { for (int g = 0; g < SCREEN_COLOR_DIMENSION; g++) @@ -489,7 +489,7 @@ namespace BoardUtils myfile << std::endl << "\t"; for (int b = 0; b < SCREEN_COLOR_DIMENSION; b++) { - auto elems = all[r][g][b].getInputColors(); + auto elems = all[r][g][b].getInputYUVColors(); myfile << currentArray.first; for (const auto& elem : elems) @@ -509,7 +509,7 @@ namespace BoardUtils } } } - myfile << std::endl << currentArray.second << std::endl; + myfile << std::endl << currentArray.second << ";" << std::endl; } myfile.close(); diff --git a/sources/lut-calibrator/CapturedColor.cpp b/sources/lut-calibrator/CapturedColor.cpp index 5272763e8..7fdb349d0 100644 --- a/sources/lut-calibrator/CapturedColor.cpp +++ b/sources/lut-calibrator/CapturedColor.cpp @@ -50,6 +50,8 @@ bool CapturedColor::calculateFinalColor() int count = 0; color = double3{ 0,0,0 }; + sortedInputYUVColors.clear(); + sortedInputYuvColors.clear(); for (auto iter = inputColors.begin(); iter != inputColors.end(); ++iter) { color += ((*iter).first) * ((*iter).second); @@ -57,18 +59,33 @@ bool CapturedColor::calculateFinalColor() // sort bool inserted = false; - for (auto sorted = sortedInputColors.begin(); sorted != sortedInputColors.end(); ++sorted) + for (auto sorted = sortedInputYUVColors.begin(); sorted != sortedInputYUVColors.end(); ++sorted) if (((*iter).second) > (*sorted).second) { - sortedInputColors.insert(sorted, std::pair((*iter).first, (*iter).second)); + sortedInputYUVColors.insert(sorted, std::pair((*iter).first, (*iter).second)); inserted = true; break; } if (!inserted) - sortedInputColors.push_back(std::pair((*iter).first, (*iter).second)); + sortedInputYUVColors.push_back(std::pair((*iter).first, (*iter).second)); } + while(sortedInputYUVColors.size() > 3 || (sortedInputYUVColors.size() == 3 && sortedInputYUVColors.back().second <= 6)) + sortedInputYUVColors.pop_back(); + + std::for_each(sortedInputYUVColors.begin(), sortedInputYUVColors.end(), [this](std::pair& m) { + + if (m.first.y >= 127 && m.first.y <= 129 && m.first.z >= 127 && m.first.z <= 129) + { + m.first.y = 128; + m.first.z = 128; + } + + sortedInputYuvColors.push_back(std::pair(static_cast(m.first) / 255.0, m.second)); + }); + + auto workColor = color / count; colorInt = ColorSpaceMath::to_byte3(workColor); @@ -118,10 +135,10 @@ bool CapturedColor::hasAnySample() void CapturedColor::addColor(ColorRgb i) { - addColor(double3(i.red, i.green, i.blue)); + addColor(byte3{ i.red, i.green, i.blue }); } -void CapturedColor::addColor(const double3& i) +void CapturedColor::addColor(const byte3& i) { bool empty = !hasAnySample(); @@ -138,15 +155,20 @@ void CapturedColor::addColor(const double3& i) max.y = i.y; if (empty || max.z < i.z) max.z = i.z; + + auto findIter = std::find_if(inputColors.begin(), inputColors.end(), [&](auto& m) { + return m.first == i; + }); - if (inputColors.find(i) == inputColors.end()) + if (findIter == inputColors.end()) { - inputColors[i] = 1; + inputColors.push_back(std::pair(i, 1)); } else { - inputColors[i] = inputColors[i] + 1; + (*findIter).second++; } + totalSamples++; } @@ -168,9 +190,8 @@ int3 CapturedColor::getSourceRGB() const } -void CapturedColor::setFinalRgb(double3 _color) +void CapturedColor::setFinalRGB(byte3 input) { - auto input = ColorSpaceMath::to_byte3(_color); bool found = (std::find(finalRGB.begin(), finalRGB.end(), input) != finalRGB.end()); if (!found) { @@ -183,12 +204,17 @@ std::list CapturedColor::getFinalRGB() const return finalRGB; } -std::list> CapturedColor::getInputColors() +std::list> CapturedColor::getInputYUVColors() const +{ + return sortedInputYUVColors; +} + +std::list> CapturedColor::getInputYuvColors() const { - return sortedInputColors; + return sortedInputYuvColors; } -int CapturedColor::getSourceError(const int3& _color) +int CapturedColor::getSourceError(const int3& _color) const { if (sourceRGBdelta == 0) { diff --git a/sources/lut-calibrator/LutCalibrator.cpp b/sources/lut-calibrator/LutCalibrator.cpp index e1491811b..6fa57079d 100644 --- a/sources/lut-calibrator/LutCalibrator.cpp +++ b/sources/lut-calibrator/LutCalibrator.cpp @@ -250,40 +250,45 @@ QString LutCalibrator::generateReport(bool full) if (color.second.x < SCREEN_COLOR_DIMENSION && color.second.y < SCREEN_COLOR_DIMENSION && color.second.z < SCREEN_COLOR_DIMENSION) { const auto& testColor = _capturedColors->all[color.second.x][color.second.y][color.second.z]; - auto yuv = testColor.yuv(); - - auto rgbBT709 =_yuvConverter->toRgb(_capturedColors->getRange(), YuvConverter::BT709, yuv) * 255.0; + if (!full) - rep.append(QString("%1: %2 => %3 , YUV: %4") + { + auto list = testColor.getInputYUVColors(); + + QStringList colors; + for (auto i = list.begin(); i != list.end(); i++) + { + const auto& yuv = *(i); + + auto rgbBT709 = _yuvConverter->toRgb(_capturedColors->getRange(), YuvConverter::BT709, static_cast(yuv.first) / 255.0) * 255.0; + + colors.append(QString("%1 (YUV: %2)") + .arg(vecToString(ColorSpaceMath::to_byte3(rgbBT709)), 12) + .arg(vecToString(yuv.first), 12) + ); + } + + rep.append(QString("%1: %2 => %3 %4") .arg(QString::fromStdString(color.first), 12) .arg(vecToString(testColor.getSourceRGB()), 12) - .arg(vecToString(ColorSpaceMath::to_byte3(rgbBT709)), 12) - .arg(vecToString(ColorSpaceMath::to_byte3(yuv * 255.0)), 12)); + .arg(colors.join(", ")) + .arg(((list.size() > 1) ? " [source noise detected]" : ""))); + } else { auto list = testColor.getFinalRGB(); - if (list.size() == 1) + QStringList colors; + for (auto i = list.begin(); i != list.end(); i++) { - rep.append(QString("%1: %2 => %3 [corrected]") - .arg(QString::fromStdString(color.first), 12) - .arg(vecToString(testColor.getSourceRGB()), 12) - .arg(vecToString(list.front()), 12)); - } - else if (list.size() > 1) - { - QStringList colors; - colors.append(QString("%1 {").arg(vecToString(list.front()), 12)); - for (auto i = ++(list.begin()); i != list.end(); i++) - { - colors.append(QString("%1").arg(vecToString((*i)), 12)); - } - rep.append(QString("%1: %2 => %3 } [corrected, source noice detected]") - .arg(QString::fromStdString(color.first), 12) - .arg(vecToString(testColor.getSourceRGB()), 12) - .arg(colors.join(" "))); + colors.append(QString("%1").arg(vecToString((*i)), 12)); } + rep.append(QString("%1: %2 => %3 %4") + .arg(QString::fromStdString(color.first), 12) + .arg(vecToString(testColor.getSourceRGB()), 12) + .arg(colors.join(", ")) + .arg(((list.size() > 1) ? "[corrected, source noise detected]" : "[corrected]"))); } }; @@ -636,30 +641,19 @@ void LutCalibrator::printReport() if ((r % 4 == 0 && g % 4 == 0 && b % 4 == 0) || _debug) { const auto& sample = _capturedColors->all[r][g][b]; - - auto list = sample.getFinalRGB(); - if (list.size() == 1) + QStringList colors; + for (auto i = list.begin(); i != list.end(); i++) { - info.append(QString("%1 => %2"). - arg(vecToString(sample.getSourceRGB())). - arg(vecToString(list.front())) - ); + colors.append(QString("%1").arg(vecToString(*i), 12)); } - else if (list.size() > 1) - { - QStringList colors; - colors.append(QString("%1 {").arg(vecToString(list.front()), 12)); - for (auto i = ++(list.begin()); i != list.end(); i++) - { - colors.append(QString("%1").arg(vecToString(*i), 12)); - } - info.append(QString("%1 => %2 } [source noice detected]") - .arg(vecToString(sample.getSourceRGB()), 12) - .arg(colors.join(" "))); - } - } + info.append(QString("%1 => %2 %3") + .arg(vecToString(sample.getSourceRGB()), 12) + .arg(colors.join(", ")) + .arg(((list.size() > 1)?"[source noise detected]" : ""))); + } + info.append("-------------------------------------------------------------------------------------------------"); sendReport(info.join("\r\n")); } @@ -820,15 +814,29 @@ void LutCalibrator::fineTune() if ((r % 2 == 0 && g % 4 == 0 && b % 4 == 0) || (r == b && b == g)) { - auto& sample = _capturedColors->all[r][g][b]; - auto srgb = hdr_to_srgb(_yuvConverter.get(), sample.yuv(), byte2{ sample.U(), sample.V() }, aspect, coefMatrix, HDR_GAMMA(gamma), gammaHLG, nits[gamma], altConvert, bt2020_to_sRgb, tryBt2020Range, bestResult->signal); - auto SRGB = to_int3(srgb * 255.0); - - currentError += sample.getSourceError(SRGB); + auto minError = MAX_CALIBRATION_ERROR; + + const auto& sample = _capturedColors->all[r][g][b]; + auto sampleList = sample.getInputYuvColors(); + + for (auto iter = sampleList.cbegin(); iter != sampleList.cend(); ++iter) + { + auto srgb = hdr_to_srgb(_yuvConverter.get(), (*iter).first, byte2{ sample.U(), sample.V() }, aspect, coefMatrix, HDR_GAMMA(gamma), gammaHLG, nits[gamma], altConvert, bt2020_to_sRgb, tryBt2020Range, bestResult->signal); + auto SRGB = to_int3(srgb * 255.0); + + auto sampleError = sample.getSourceError(SRGB); + + if ((r + 2 == SCREEN_COLOR_DIMENSION && g + 2 == SCREEN_COLOR_DIMENSION && b + 2 == SCREEN_COLOR_DIMENSION && + (SRGB.x > 248 || SRGB.x < 232))) + sampleError = bestResult->minError; - if ((r + 2 == SCREEN_COLOR_DIMENSION && g + 2 == SCREEN_COLOR_DIMENSION && b + 2 == SCREEN_COLOR_DIMENSION && - (SRGB.x > 248 || SRGB.x < 232))) - currentError = bestResult->minError; + if (sampleError < minError) + minError = sampleError; + } + + currentError += minError; + + } } if (currentError < bestResult->minError) @@ -956,7 +964,10 @@ QString LutCalibrator::writeLUT(Logger* _log, QString _rootPath, BestResult* bes { if (YUV.y >= 127 && YUV.y <= 129 && YUV.z >= 127 && YUV.z <= 129) { - YUV.y = YUV.z = 128; + YUV.y = 128; + YUV.z = 128; + yuv.y = 128.0 / 255.0; + yuv.z = 128.0 / 255.0; } yuv = hdr_to_srgb(&yuvConverter, yuv, byte2(YUV.y, YUV.z), bestResult->aspect, bestResult->coefMatrix, bestResult->gamma, bestResult->gammaHLG, bestResult->nits, bestResult->altConvert, bestResult->altPrimariesToSrgb, bestResult->bt2020Range, bestResult->signal); } @@ -979,26 +990,21 @@ QString LutCalibrator::writeLUT(Logger* _log, QString _rootPath, BestResult* bes for (int b = 0; b < SCREEN_COLOR_DIMENSION; b++) { auto& sample = (*all)[r][g][b]; - uint32_t ind_lutd = LUT_INDEX(((uint32_t)sample.Y()), ((uint32_t)sample.U()), ((uint32_t)sample.V())); - double rr = _lut.data()[ind_lutd]; - double gg = _lut.data()[ind_lutd + 1]; - double bb = _lut.data()[ind_lutd + 2]; - double3 fColor = double3{ rr, gg, bb }; - sample.setFinalRgb( fColor); + auto list = sample.getInputYUVColors(); + for(auto item = list.begin(); item != list.end(); ++item) + { + auto ind_lutd = LUT_INDEX(((uint32_t)(*item).first.x), ((uint32_t)(*item).first.y), ((uint32_t)(*item).first.z)); + (*item).first = byte3{ _lut.data()[ind_lutd], _lut.data()[ind_lutd + 1], _lut.data()[ind_lutd + 2] }; + (*item).second = sample.getSourceError(static_cast((*item).first)); + } - auto list = sample.getInputColors(); - if (list.size() > 1) - for(const auto& c : list) - { - ind_lutd = LUT_INDEX(((uint32_t)c.first.x), ((uint32_t)c.first.y), ((uint32_t)c.first.z)); + list.sort([](const std::pair& a, const std::pair& b) { return a.second < b.second; }); - rr = _lut.data()[ind_lutd]; - gg = _lut.data()[ind_lutd + 1]; - bb = _lut.data()[ind_lutd + 2]; - fColor = double3{ rr, gg, bb }; - sample.setFinalRgb(fColor); - } + for (auto item = list.begin(); item != list.end(); ++item) + { + sample.setFinalRGB((*item).first); + } } } @@ -1141,16 +1147,16 @@ void LutCalibrator::capturedPrimariesCorrection(ColorSpaceMath::HDR_GAMMA gamma, bool LutCalibrator::setTestData() { - std::vector> testData; + std::vector> capturedData; // asssign your test data from calibration_captured_yuv.txt to testData here // verify - if (testData.size() != SCREEN_COLOR_DIMENSION * SCREEN_COLOR_DIMENSION * SCREEN_COLOR_DIMENSION) + if (capturedData.size() != SCREEN_COLOR_DIMENSION * SCREEN_COLOR_DIMENSION * SCREEN_COLOR_DIMENSION) return false; - auto iter = testData.begin(); + auto iter = capturedData.begin(); for (int r = 0; r < SCREEN_COLOR_DIMENSION; r++) for (int g = 0; g < SCREEN_COLOR_DIMENSION; g++) for (int b = 0; b < SCREEN_COLOR_DIMENSION; b++, ++iter) diff --git a/www/i18n/en.json b/www/i18n/en.json index 20dcfcc9f..1eba3bb29 100644 --- a/www/i18n/en.json +++ b/www/i18n/en.json @@ -1149,7 +1149,6 @@ "general_comp_RAWUDPSERVER" : "UDP raw receiver", "edt_udp_raw_server" : "A lightweight server for remote synchronization of HyperHDR instances using UDP and raw RGB LED colors. Can also be controlled from another applications (similar to Boblight server) in a very simple way. For HyperHDR synchronization use the 'udpraw' light source in the sender.
Important: both instances should have the same number of LEDs (max 490) and same geometry for this to work. Smoothing should only be enabled on one instance, not both!", "main_menu_grabber_calibration_token" : "LUT calibration", - "grabber_calibration_expl": "This tool allows you to create a new calibrated HDR LUT for your grabber (or external flatbuffers source) as close to the actual input colors as possible.
You need an HDR10 video source that can display this web page, for example: Windows 10 with HDR enabled in the properties of the graphics driver.
The screen may flicker during calibration. The process typically takes about few minutes on a Intel 7 Windows PC (depending on the host CPU resources and the video capturing framerate).
The calculations are intensive and put a strain on your equipment.
You can monitor the progress in HyperHDR logs using the browser from other device.


1 If everything is properly connected, this page should be displayed on the TV screen (as HDR content) and live preview in HyperHDR (captured by the grabber).
2 Absolute minimum capturing resolution is 384x216 (we will verify this). Recommended are: 1920x1080 at least 1280x720. Aspect 1920/1080 must be preserved.
3 It's preffered to disable 'Quarter of frame mode' in your grabber properties.
4 You must set the grabber's video format to MJPEG/NV12/YUV.
5 Before you run the process please put your WWW browser in the full-screen mode (F11 key, we will verify this).

After completing the calibration, your new LUT table file (lut_lin_tables.3d) will be created in the user's HyperHDR home directory and is immediately ready to use when you just enable HDR tone mapping. Please verify HyperHDR logs for details.", "grabber_calibration_force": "Force auto-detection:", "edt_dev_max_retry" : "Maximum number of retries", "edt_rgbw_calibration_enable" : "White channel calibration (RGBW only)", @@ -1247,9 +1246,10 @@ "option_calibration_intro" : "Please select calibration type", "option_calibration_video" : "Calibration using a test video played by your favorite video player.
We calibrate LUT taking into account the grabber, player and your TV.", "option_calibration_classic" : "Calibration using Windows with HDR mode enabled and a web browser.
We calibrate LUT taking into account the grabber and your TV.", - "video_calibration_overview" : "1 You need to set the video format of your grabber to MJPEG/YUYV/NV12. Other formats are not supported.

2 If you calibrate using Flatbuffers, you need to enable tone mapping in its settings. Only the NV12 video format is supported.

3 You can download test files here: link. In your player, start playing the test file. You should see it in the HyperHDR video preview. The test screen must take up the entire screen and no extraneous elements, such as the player menu, can be visible.

4 For calibration, you should choose a file with 'hdr' in the name unless your system or player automatically uses SDR to HDR tone mapping. In that case, to adapt to such a scenario, choose a file with 'sdr' in the name.

5 The YUV420 format provides the greatest compatibility with average quality and is the most common. The YUV444 format provides the best quality but it is rare to find materials encoded in this form.", + "video_calibration_overview" : "1 You need to set the video format of your grabber to MJPEG/YUYV/NV12. Other formats are not supported.

2 If you calibrate using Flatbuffers, you need to enable tone mapping in its settings. Only the NV12 video format is supported.

3 You can download test files here: link. In your player, start playing the test file. You should see it in the HyperHDR video preview. The test screen must take up the entire screen and no extraneous elements, such as the player menu, can be visible.

4 For calibration, you should choose a file with 'hdr' in the name unless your system or player automatically uses SDR to HDR tone mapping. In that case, to adapt to such a scenario, choose a file with 'sdr' in the name.

5 The YUV420 format provides the greatest compatibility with average quality and is the most common. The YUV444 format provides the best quality but it is rare to find materials encoded in this form.

6 If you are calibrating using Windows 11 (using a web browser or video player as the video source), turn off features such as 'Night light', 'Automatic manage color for apps' and 'Auto-HDR'. Do not change the color balance in the graphics driver. The GFX output should support e.g. 10 or 12 bit RGB in full PC range.", "chk_calibration_debug" : "Debug", "flatbuffers_nv12_quarter_of_frame_title": "Quarter of frame for NV12", "flatbuffers_nv12_quarter_of_frame_expl": "The NV12 codec contains four times more information about brightness than about color. This option allows you to reduce CPU load by reducing the height and width of the video frame by 2 without losing color information.", - "chk_calibration_postprocessing" : "Post-processing" + "chk_calibration_postprocessing" : "Post-processing", + "grabber_calibration_expl": "This tool allows you to create a new calibrated HDR LUT for your grabber (or external flatbuffers source) as close to the actual input colors as possible.
You need an HDR10 video source that can display this web page, for example: Windows 10 with HDR enabled in the properties of the graphics driver.
The screen may flicker during calibration. The process typically takes about few minutes on a Intel 7 Windows PC (depending on the host CPU resources and the video capturing framerate).
The calculations are intensive and put a strain on your equipment.
You can monitor the progress in HyperHDR logs using the browser from other device.


1 If everything is properly connected, this page should be displayed on the TV screen (as HDR content) and live preview in HyperHDR (captured by the grabber).
2 Absolute minimum capturing resolution is 1280x720 (we will verify this). Recommended is 1920x1080 YUV/NV12. Aspect 1920/1080 must be preserved.
3 You must disable 'Quarter of frame mode' in your grabber properties if it's enabled.
4 You must set the grabber's video format to MJPEG/NV12/YUV.
5 Before you run the process please put your WWW browser in the full-screen mode (F11 key, we will verify this).
6 If you are calibrating using Windows 11, turn off features such as 'Night light', 'Automatic manage color for apps' and 'Auto-HDR'. Do not change the color balance in the graphics driver. The GFX output should support e.g. 10 or 12 bit RGB in full PC range.

After completing the calibration, your new LUT table file (lut_lin_tables.3d) will be created in the user's HyperHDR home directory and is immediately ready to use when you just enable HDR tone mapping. Please verify HyperHDR logs for details." }