HELLO·Android
系统源代码
IT资讯
技术文章
我的收藏
注册
登录
-
我收藏的文章
创建代码块
我的代码块
我的账号
Lollipop
|
5.0.1_r1
下载
查看原文件
收藏
根目录
external
deqp
modules
gles3
performance
es3pBufferDataUploadTests.cpp
/*------------------------------------------------------------------------- * drawElements Quality Program OpenGL ES 3.0 Module * ------------------------------------------------- * * Copyright 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *//*! * \file * \brief Buffer data upload performance tests. *//*--------------------------------------------------------------------*/ #include "es3pBufferDataUploadTests.hpp" #include "glsCalibration.hpp" #include "tcuTestLog.hpp" #include "tcuVectorUtil.hpp" #include "tcuSurface.hpp" #include "tcuCPUWarmup.hpp" #include "tcuRenderTarget.hpp" #include "gluRenderContext.hpp" #include "gluShaderProgram.hpp" #include "gluStrUtil.hpp" #include "gluPixelTransfer.hpp" #include "gluObjectWrapper.hpp" #include "glwFunctions.hpp" #include "glwEnums.hpp" #include "deClock.h" #include "deMath.h" #include "deStringUtil.hpp" #include "deRandom.hpp" #include "deMemory.h" #include "deThread.h" #include
#include
#include
namespace deqp { namespace gles3 { namespace Performance { namespace { using gls::theilSenSiegelLinearRegression; using gls::LineParametersWithConfidence; static const char* const s_dummyVertexShader = "#version 300 es\n" "in highp vec4 a_position;\n" "void main (void)\n" "{\n" " gl_Position = a_position;\n" "}\n"; static const char* const s_dummyFragnentShader = "#version 300 es\n" "layout(location = 0) out mediump vec4 dEQP_FragColor;\n" "void main (void)\n" "{\n" " dEQP_FragColor = vec4(1.0, 0.0, 0.0, 1.0);\n" "}\n"; static const char* const s_colorVertexShader = "#version 300 es\n" "in highp vec4 a_position;\n" "in highp vec4 a_color;\n" "out highp vec4 v_color;\n" "void main (void)\n" "{\n" " gl_Position = a_position;\n" " v_color = a_color;\n" "}\n"; static const char* const s_colorFragmentShader = "#version 300 es\n" "layout(location = 0) out mediump vec4 dEQP_FragColor;\n" "in mediump vec4 v_color;\n" "void main (void)\n" "{\n" " dEQP_FragColor = v_color;\n" "}\n"; template
struct EnableIf { typedef TrueType Type; }; template
struct EnableIf
{ }; template
struct EnableIfNot { }; template
struct EnableIfNot
{ typedef TrueType Type; }; struct SingleOperationDuration { deUint64 totalDuration; deUint64 fitResponseDuration; // used for fitting }; struct MapBufferRangeDuration { deUint64 mapDuration; deUint64 unmapDuration; deUint64 writeDuration; deUint64 allocDuration; deUint64 totalDuration; deUint64 fitResponseDuration; }; struct MapBufferRangeDurationNoAlloc { deUint64 mapDuration; deUint64 unmapDuration; deUint64 writeDuration; deUint64 totalDuration; deUint64 fitResponseDuration; }; struct MapBufferRangeFlushDuration { deUint64 mapDuration; deUint64 unmapDuration; deUint64 writeDuration; deUint64 flushDuration; deUint64 allocDuration; deUint64 totalDuration; deUint64 fitResponseDuration; }; struct MapBufferRangeFlushDurationNoAlloc { deUint64 mapDuration; deUint64 unmapDuration; deUint64 writeDuration; deUint64 flushDuration; deUint64 totalDuration; deUint64 fitResponseDuration; }; struct RenderReadDuration { deUint64 renderDuration; deUint64 readDuration; deUint64 renderReadDuration; deUint64 totalDuration; deUint64 fitResponseDuration; }; struct UnrelatedUploadRenderReadDuration { deUint64 renderDuration; deUint64 readDuration; deUint64 renderReadDuration; deUint64 totalDuration; deUint64 fitResponseDuration; }; struct UploadRenderReadDuration { deUint64 uploadDuration; deUint64 renderDuration; deUint64 readDuration; deUint64 totalDuration; deUint64 renderReadDuration; deUint64 fitResponseDuration; }; struct UploadRenderReadDurationWithUnrelatedUploadSize { deUint64 uploadDuration; deUint64 renderDuration; deUint64 readDuration; deUint64 totalDuration; deUint64 renderReadDuration; deUint64 fitResponseDuration; }; struct RenderUploadRenderReadDuration { deUint64 firstRenderDuration; deUint64 uploadDuration; deUint64 secondRenderDuration; deUint64 readDuration; deUint64 totalDuration; deUint64 renderReadDuration; deUint64 fitResponseDuration; }; template
struct UploadSampleResult { typedef SampleT SampleType; int bufferSize; int allocatedSize; int writtenSize; SampleType duration; }; template
struct RenderSampleResult { typedef SampleT SampleType; int uploadedDataSize; int renderDataSize; int unrelatedDataSize; int numVertices; SampleT duration; }; struct SingleOperationStatistics { float minTime; float maxTime; float medianTime; float min2DecileTime; // !< minimum value in the 2nd decile float max9DecileTime; // !< maximum value in the 9th decile }; struct SingleCallStatistics { SingleOperationStatistics result; float medianRate; float maxDiffTime; float maxDiff9DecileTime; float medianDiffTime; float maxRelDiffTime; float max9DecileRelDiffTime; float medianRelDiffTime; }; struct MapCallStatistics { SingleOperationStatistics map; SingleOperationStatistics unmap; SingleOperationStatistics write; SingleOperationStatistics alloc; SingleOperationStatistics result; float medianRate; float maxDiffTime; float maxDiff9DecileTime; float medianDiffTime; float maxRelDiffTime; float max9DecileRelDiffTime; float medianRelDiffTime; }; struct MapFlushCallStatistics { SingleOperationStatistics map; SingleOperationStatistics unmap; SingleOperationStatistics write; SingleOperationStatistics flush; SingleOperationStatistics alloc; SingleOperationStatistics result; float medianRate; float maxDiffTime; float maxDiff9DecileTime; float medianDiffTime; float maxRelDiffTime; float max9DecileRelDiffTime; float medianRelDiffTime; }; struct RenderReadStatistics { SingleOperationStatistics render; SingleOperationStatistics read; SingleOperationStatistics result; SingleOperationStatistics total; float medianRate; float maxDiffTime; float maxDiff9DecileTime; float medianDiffTime; float maxRelDiffTime; float max9DecileRelDiffTime; float medianRelDiffTime; }; struct UploadRenderReadStatistics { SingleOperationStatistics upload; SingleOperationStatistics render; SingleOperationStatistics read; SingleOperationStatistics result; SingleOperationStatistics total; float medianRate; float maxDiffTime; float maxDiff9DecileTime; float medianDiffTime; float maxRelDiffTime; float max9DecileRelDiffTime; float medianRelDiffTime; }; struct RenderUploadRenderReadStatistics { SingleOperationStatistics firstRender; SingleOperationStatistics upload; SingleOperationStatistics secondRender; SingleOperationStatistics read; SingleOperationStatistics result; SingleOperationStatistics total; float medianRate; float maxDiffTime; float maxDiff9DecileTime; float medianDiffTime; float maxRelDiffTime; float max9DecileRelDiffTime; float medianRelDiffTime; }; template
struct SampleTypeTraits { }; template <> struct SampleTypeTraits
{ typedef SingleCallStatistics StatsType; enum { HAS_MAP_STATS = 0 }; enum { HAS_UNMAP_STATS = 0 }; enum { HAS_WRITE_STATS = 0 }; enum { HAS_FLUSH_STATS = 0 }; enum { HAS_ALLOC_STATS = 0 }; enum { LOG_CONTRIBUTIONS = 0 }; }; template <> struct SampleTypeTraits
{ typedef MapCallStatistics StatsType; enum { HAS_MAP_STATS = 1 }; enum { HAS_UNMAP_STATS = 1 }; enum { HAS_WRITE_STATS = 1 }; enum { HAS_FLUSH_STATS = 0 }; enum { HAS_ALLOC_STATS = 1 }; enum { LOG_CONTRIBUTIONS = 1 }; }; template <> struct SampleTypeTraits
{ typedef MapCallStatistics StatsType; enum { HAS_MAP_STATS = 1 }; enum { HAS_UNMAP_STATS = 1 }; enum { HAS_WRITE_STATS = 1 }; enum { HAS_FLUSH_STATS = 0 }; enum { HAS_ALLOC_STATS = 0 }; enum { LOG_CONTRIBUTIONS = 1 }; }; template <> struct SampleTypeTraits
{ typedef MapFlushCallStatistics StatsType; enum { HAS_MAP_STATS = 1 }; enum { HAS_UNMAP_STATS = 1 }; enum { HAS_WRITE_STATS = 1 }; enum { HAS_FLUSH_STATS = 1 }; enum { HAS_ALLOC_STATS = 1 }; enum { LOG_CONTRIBUTIONS = 1 }; }; template <> struct SampleTypeTraits
{ typedef MapFlushCallStatistics StatsType; enum { HAS_MAP_STATS = 1 }; enum { HAS_UNMAP_STATS = 1 }; enum { HAS_WRITE_STATS = 1 }; enum { HAS_FLUSH_STATS = 1 }; enum { HAS_ALLOC_STATS = 0 }; enum { LOG_CONTRIBUTIONS = 1 }; }; template <> struct SampleTypeTraits
{ typedef RenderReadStatistics StatsType; enum { HAS_RENDER_STATS = 1 }; enum { HAS_READ_STATS = 1 }; enum { HAS_UPLOAD_STATS = 0 }; enum { HAS_TOTAL_STATS = 1 }; enum { HAS_FIRST_RENDER_STATS = 0 }; enum { HAS_SECOND_RENDER_STATS = 0 }; enum { LOG_CONTRIBUTIONS = 1 }; }; template <> struct SampleTypeTraits
{ typedef RenderReadStatistics StatsType; enum { HAS_RENDER_STATS = 1 }; enum { HAS_READ_STATS = 1 }; enum { HAS_UPLOAD_STATS = 0 }; enum { HAS_TOTAL_STATS = 1 }; enum { HAS_FIRST_RENDER_STATS = 0 }; enum { HAS_SECOND_RENDER_STATS = 0 }; enum { LOG_CONTRIBUTIONS = 1 }; }; template <> struct SampleTypeTraits
{ typedef UploadRenderReadStatistics StatsType; enum { HAS_RENDER_STATS = 1 }; enum { HAS_READ_STATS = 1 }; enum { HAS_UPLOAD_STATS = 1 }; enum { HAS_TOTAL_STATS = 1 }; enum { HAS_FIRST_RENDER_STATS = 0 }; enum { HAS_SECOND_RENDER_STATS = 0 }; enum { LOG_CONTRIBUTIONS = 1 }; enum { LOG_UNRELATED_UPLOAD_SIZE = 0 }; }; template <> struct SampleTypeTraits
{ typedef UploadRenderReadStatistics StatsType; enum { HAS_RENDER_STATS = 1 }; enum { HAS_READ_STATS = 1 }; enum { HAS_UPLOAD_STATS = 1 }; enum { HAS_TOTAL_STATS = 1 }; enum { HAS_FIRST_RENDER_STATS = 0 }; enum { HAS_SECOND_RENDER_STATS = 0 }; enum { LOG_CONTRIBUTIONS = 1 }; enum { LOG_UNRELATED_UPLOAD_SIZE = 1 }; }; template <> struct SampleTypeTraits
{ typedef RenderUploadRenderReadStatistics StatsType; enum { HAS_RENDER_STATS = 0 }; enum { HAS_READ_STATS = 1 }; enum { HAS_UPLOAD_STATS = 1 }; enum { HAS_TOTAL_STATS = 1 }; enum { HAS_FIRST_RENDER_STATS = 1 }; enum { HAS_SECOND_RENDER_STATS = 1 }; enum { LOG_CONTRIBUTIONS = 1 }; enum { LOG_UNRELATED_UPLOAD_SIZE = 1 }; }; struct UploadSampleAnalyzeResult { float transferRateMedian; float transferRateAtRange; float transferRateAtInfinity; }; struct RenderSampleAnalyzeResult { float renderRateMedian; float renderRateAtRange; float renderRateAtInfinity; }; class UnmapFailureError : public std::exception { public: UnmapFailureError (void) : std::exception() {} }; static std::string getHumanReadableByteSize (int numBytes) { std::ostringstream buf; if (numBytes < 1024) buf << numBytes << " byte(s)"; else if (numBytes < 1024 * 1024) buf << de::floatToString(numBytes/1024.0f, 1) << " KiB"; else buf << de::floatToString(numBytes/1024.0f/1024.0f, 1) << " MiB"; return buf.str(); } static deUint64 medianTimeMemcpy (void* dst, const void* src, int numBytes) { // Time used by memcpy is assumed to be asymptotically linear // With large numBytes, the probability of context switch or other random // event is high. Apply memcpy in parts and report how much time would // memcpy have used with the median transfer rate. // Less than 1MiB, no need to do anything special if (numBytes < 1048576) { deUint64 startTime; deUint64 endTime; deYield(); startTime = deGetMicroseconds(); deMemcpy(dst, src, numBytes); endTime = deGetMicroseconds(); return endTime - startTime; } else { // Do memcpy in multiple parts const int numSections = 5; const int sectionAlign = 16; int sectionStarts[numSections+1]; int sectionLens[numSections]; deUint64 sectionTimes[numSections]; deUint64 medianTime; deUint64 bestTime = 0; for (int sectionNdx = 0; sectionNdx < numSections; ++sectionNdx) sectionStarts[sectionNdx] = deAlign32((numBytes * sectionNdx / numSections), sectionAlign); sectionStarts[numSections] = numBytes; for (int sectionNdx = 0; sectionNdx < numSections; ++sectionNdx) sectionLens[sectionNdx] = sectionStarts[sectionNdx+1] - sectionStarts[sectionNdx]; // Memcpy is usually called after mapbuffer range which may take // a lot of time. To prevent power management from kicking in during // copy, warm up more. { deYield(); tcu::warmupCPU(); deYield(); } for (int sectionNdx = 0; sectionNdx < numSections; ++sectionNdx) { deUint64 startTime; deUint64 endTime; startTime = deGetMicroseconds(); deMemcpy((deUint8*)dst + sectionStarts[sectionNdx], (const deUint8*)src + sectionStarts[sectionNdx], sectionLens[sectionNdx]); endTime = deGetMicroseconds(); sectionTimes[sectionNdx] = endTime - startTime; if (!bestTime || sectionTimes[sectionNdx] < bestTime) bestTime = sectionTimes[sectionNdx]; // Detect if write takes 50% longer than it should, and warm up if that happened if (sectionNdx != numSections-1 && (float)sectionTimes[sectionNdx] > 1.5f * bestTime) { deYield(); tcu::warmupCPU(); deYield(); } } std::sort(sectionTimes, sectionTimes + numSections); if ((numSections % 2) == 0) medianTime = (sectionTimes[numSections / 2 - 1] + sectionTimes[numSections / 2]) / 2; else medianTime = sectionTimes[numSections / 2]; return medianTime*numSections; } } static float dummyCalculation (float initial, int workSize) { float a = initial; int b = 123; for (int ndx = 0; ndx < workSize; ++ndx) { a = deFloatCos(a + (float)b); b = (b + 63) % 107 + de::abs((int)(a*10.0f)); } return a + (float)b; } static void busyWait (int microseconds) { const deUint64 maxSingleWaitTime = 1000; // 1ms const deUint64 endTime = deGetMicroseconds() + microseconds; float dummy = *tcu::warmupCPUInternal::g_dummy.m_v; int workSize = 500; // exponentially increase work, cap to 1ms while (deGetMicroseconds() < endTime) { const deUint64 startTime = deGetMicroseconds(); deUint64 totalTime; dummy = dummyCalculation(dummy, workSize); totalTime = deGetMicroseconds() - startTime; if (totalTime >= maxSingleWaitTime) break; else workSize *= 2; } // "wait" while (deGetMicroseconds() < endTime) dummy = dummyCalculation(dummy, workSize); *tcu::warmupCPUInternal::g_dummy.m_v = dummy; } // Sample from given values using linear interpolation at a given position as if values were laid to range [0, 1] template
static float linearSample (const std::vector
& values, float position) { DE_ASSERT(position >= 0.0f); DE_ASSERT(position <= 1.0f); const float floatNdx = ((int)values.size() - 1) * position; const int lowerNdx = (int)deFloatFloor(floatNdx); const int higherNdx = lowerNdx + 1; const float interpolationFactor = floatNdx - (float)lowerNdx; DE_ASSERT(lowerNdx >= 0 && lowerNdx < (int)values.size()); DE_ASSERT(higherNdx >= 0 && higherNdx < (int)values.size()); DE_ASSERT(interpolationFactor >= 0 && interpolationFactor < 1.0f); return tcu::mix((float)values[lowerNdx], (float)values[higherNdx], interpolationFactor); } template
SingleOperationStatistics calculateSingleOperationStatistics (const std::vector
& samples, deUint64 T::SampleType::*target) { SingleOperationStatistics stats; std::vector
values(samples.size()); for (int ndx = 0; ndx < (int)samples.size(); ++ndx) values[ndx] = samples[ndx].duration.*target; std::sort(values.begin(), values.end()); stats.minTime = (float)values.front(); stats.maxTime = (float)values.back(); stats.medianTime = linearSample(values, 0.5f); stats.min2DecileTime = linearSample(values, 0.1f); stats.max9DecileTime = linearSample(values, 0.9f); return stats; } template
void calculateBasicStatistics (StatisticsType& stats, const LineParametersWithConfidence& fit, const std::vector
& samples, int SampleType::*predictor) { std::vector
values(samples.size()); for (int ndx = 0; ndx < (int)samples.size(); ++ndx) values[ndx] = samples[ndx].duration.fitResponseDuration; // median rate { std::vector
processingRates(samples.size()); for (int ndx = 0; ndx < (int)samples.size(); ++ndx) { const float timeInSeconds = values[ndx] / 1000.0f / 1000.0f; processingRates[ndx] = samples[ndx].*predictor / timeInSeconds; } std::sort(processingRates.begin(), processingRates.end()); stats.medianRate = linearSample(processingRates, 0.5f); } // results compared to the approximation { std::vector
timeDiffs(samples.size()); for (int ndx = 0; ndx < (int)samples.size(); ++ndx) { const float prediction = samples[ndx].*predictor * fit.coefficient + fit.offset; const float actual = (float)values[ndx]; timeDiffs[ndx] = actual - prediction; } std::sort(timeDiffs.begin(), timeDiffs.end()); stats.maxDiffTime = timeDiffs.back(); stats.maxDiff9DecileTime = linearSample(timeDiffs, 0.9f); stats.medianDiffTime = linearSample(timeDiffs, 0.5f); } // relative comparison to the approximation { std::vector
relativeDiffs(samples.size()); for (int ndx = 0; ndx < (int)samples.size(); ++ndx) { const float prediction = samples[ndx].*predictor * fit.coefficient + fit.offset; const float actual = (float)values[ndx]; // Ignore cases where we predict negative times, or if // ratio would be (nearly) infinite: ignore if predicted // time is less than 1 microsecond if (prediction < 1.0f) relativeDiffs[ndx] = 0.0f; else relativeDiffs[ndx] = (actual - prediction) / prediction; } std::sort(relativeDiffs.begin(), relativeDiffs.end()); stats.maxRelDiffTime = relativeDiffs.back(); stats.max9DecileRelDiffTime = linearSample(relativeDiffs, 0.9f); stats.medianRelDiffTime = linearSample(relativeDiffs, 0.5f); } // values calculated using sorted timings std::sort(values.begin(), values.end()); stats.result.minTime = (float)values.front(); stats.result.maxTime = (float)values.back(); stats.result.medianTime = linearSample(values, 0.5f); stats.result.min2DecileTime = linearSample(values, 0.1f); stats.result.max9DecileTime = linearSample(values, 0.9f); } template
void calculateBasicTransferStatistics (StatisticsType& stats, const LineParametersWithConfidence& fit, const std::vector
& samples) { calculateBasicStatistics(stats, fit, samples, &SampleType::writtenSize); } template
void calculateBasicRenderStatistics (StatisticsType& stats, const LineParametersWithConfidence& fit, const std::vector
& samples) { calculateBasicStatistics(stats, fit, samples, &SampleType::renderDataSize); } static SingleCallStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { SingleCallStatistics stats; calculateBasicTransferStatistics(stats, fit, samples); return stats; } static MapCallStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { MapCallStatistics stats; calculateBasicTransferStatistics(stats, fit, samples); stats.map = calculateSingleOperationStatistics(samples, &MapBufferRangeDuration::mapDuration); stats.unmap = calculateSingleOperationStatistics(samples, &MapBufferRangeDuration::unmapDuration); stats.write = calculateSingleOperationStatistics(samples, &MapBufferRangeDuration::writeDuration); stats.alloc = calculateSingleOperationStatistics(samples, &MapBufferRangeDuration::allocDuration); return stats; } static MapFlushCallStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { MapFlushCallStatistics stats; calculateBasicTransferStatistics(stats, fit, samples); stats.map = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDuration::mapDuration); stats.unmap = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDuration::unmapDuration); stats.write = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDuration::writeDuration); stats.flush = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDuration::flushDuration); stats.alloc = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDuration::allocDuration); return stats; } static MapCallStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { MapCallStatistics stats; calculateBasicTransferStatistics(stats, fit, samples); stats.map = calculateSingleOperationStatistics(samples, &MapBufferRangeDurationNoAlloc::mapDuration); stats.unmap = calculateSingleOperationStatistics(samples, &MapBufferRangeDurationNoAlloc::unmapDuration); stats.write = calculateSingleOperationStatistics(samples, &MapBufferRangeDurationNoAlloc::writeDuration); return stats; } static MapFlushCallStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { MapFlushCallStatistics stats; calculateBasicTransferStatistics(stats, fit, samples); stats.map = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDurationNoAlloc::mapDuration); stats.unmap = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDurationNoAlloc::unmapDuration); stats.write = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDurationNoAlloc::writeDuration); stats.flush = calculateSingleOperationStatistics(samples, &MapBufferRangeFlushDurationNoAlloc::flushDuration); return stats; } static RenderReadStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { RenderReadStatistics stats; calculateBasicRenderStatistics(stats, fit, samples); stats.render = calculateSingleOperationStatistics(samples, &RenderReadDuration::renderDuration); stats.read = calculateSingleOperationStatistics(samples, &RenderReadDuration::readDuration); stats.total = calculateSingleOperationStatistics(samples, &RenderReadDuration::totalDuration); return stats; } static RenderReadStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { RenderReadStatistics stats; calculateBasicRenderStatistics(stats, fit, samples); stats.render = calculateSingleOperationStatistics(samples, &UnrelatedUploadRenderReadDuration::renderDuration); stats.read = calculateSingleOperationStatistics(samples, &UnrelatedUploadRenderReadDuration::readDuration); stats.total = calculateSingleOperationStatistics(samples, &UnrelatedUploadRenderReadDuration::totalDuration); return stats; } static UploadRenderReadStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { UploadRenderReadStatistics stats; calculateBasicRenderStatistics(stats, fit, samples); stats.upload = calculateSingleOperationStatistics(samples, &UploadRenderReadDuration::uploadDuration); stats.render = calculateSingleOperationStatistics(samples, &UploadRenderReadDuration::renderDuration); stats.read = calculateSingleOperationStatistics(samples, &UploadRenderReadDuration::readDuration); stats.total = calculateSingleOperationStatistics(samples, &UploadRenderReadDuration::totalDuration); return stats; } static UploadRenderReadStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { UploadRenderReadStatistics stats; calculateBasicRenderStatistics(stats, fit, samples); stats.upload = calculateSingleOperationStatistics(samples, &UploadRenderReadDurationWithUnrelatedUploadSize::uploadDuration); stats.render = calculateSingleOperationStatistics(samples, &UploadRenderReadDurationWithUnrelatedUploadSize::renderDuration); stats.read = calculateSingleOperationStatistics(samples, &UploadRenderReadDurationWithUnrelatedUploadSize::readDuration); stats.total = calculateSingleOperationStatistics(samples, &UploadRenderReadDurationWithUnrelatedUploadSize::totalDuration); return stats; } static RenderUploadRenderReadStatistics calculateSampleStatistics (const LineParametersWithConfidence& fit, const std::vector
>& samples) { RenderUploadRenderReadStatistics stats; calculateBasicRenderStatistics(stats, fit, samples); stats.firstRender = calculateSingleOperationStatistics(samples, &RenderUploadRenderReadDuration::firstRenderDuration); stats.upload = calculateSingleOperationStatistics(samples, &RenderUploadRenderReadDuration::uploadDuration); stats.secondRender = calculateSingleOperationStatistics(samples, &RenderUploadRenderReadDuration::secondRenderDuration); stats.read = calculateSingleOperationStatistics(samples, &RenderUploadRenderReadDuration::readDuration); stats.total = calculateSingleOperationStatistics(samples, &RenderUploadRenderReadDuration::totalDuration); return stats; } template
static LineParametersWithConfidence fitLineToSamples (const std::vector
>& samples, int beginNdx, int endNdx, int step, deUint64 DurationType::*target = &DurationType::fitResponseDuration) { std::vector
samplePoints; for (int sampleNdx = beginNdx; sampleNdx < endNdx; sampleNdx += step) { tcu::Vec2 point; point.x() = (float)(samples[sampleNdx].writtenSize); point.y() = (float)(samples[sampleNdx].duration.*target); samplePoints.push_back(point); } return theilSenSiegelLinearRegression(samplePoints, 0.6f); } template
static LineParametersWithConfidence fitLineToSamples (const std::vector
>& samples, int beginNdx, int endNdx, int step, deUint64 DurationType::*target = &DurationType::fitResponseDuration) { std::vector
samplePoints; for (int sampleNdx = beginNdx; sampleNdx < endNdx; sampleNdx += step) { tcu::Vec2 point; point.x() = (float)(samples[sampleNdx].renderDataSize); point.y() = (float)(samples[sampleNdx].duration.*target); samplePoints.push_back(point); } return theilSenSiegelLinearRegression(samplePoints, 0.6f); } template
static LineParametersWithConfidence fitLineToSamples (const std::vector
& samples, int beginNdx, int endNdx, deUint64 T::SampleType::*target = &T::SampleType::fitResponseDuration) { return fitLineToSamples(samples, beginNdx, endNdx, 1, target); } template
static LineParametersWithConfidence fitLineToSamples (const std::vector
& samples, deUint64 T::SampleType::*target = &T::SampleType::fitResponseDuration) { return fitLineToSamples(samples, 0, (int)samples.size(), target); } static float getAreaBetweenLines (float xmin, float xmax, float lineAOffset, float lineACoefficient, float lineBOffset, float lineBCoefficient) { const float lineAMin = lineAOffset + lineACoefficient * xmin; const float lineAMax = lineAOffset + lineACoefficient * xmax; const float lineBMin = lineBOffset + lineBCoefficient * xmin; const float lineBMax = lineBOffset + lineBCoefficient * xmax; const bool aOverBAtBegin = (lineAMin > lineBMin); const bool aOverBAtEnd = (lineAMax > lineBMax); if (aOverBAtBegin == aOverBAtEnd) { // lines do not intersect const float midpoint = (xmin + xmax) / 2.0f; const float width = (xmax - xmin); const float lineAHeight = lineAOffset + lineACoefficient * midpoint; const float lineBHeight = lineBOffset + lineBCoefficient * midpoint; return width * de::abs(lineAHeight - lineBHeight); } else { // lines intersect const float approachCoeffient = de::abs(lineACoefficient - lineBCoefficient); const float epsilon = 0.0001f; const float leftHeight = de::abs(lineAMin - lineBMin); const float rightHeight = de::abs(lineAMax - lineBMax); if (approachCoeffient < epsilon) return 0.0f; return (0.5f * leftHeight * (leftHeight / approachCoeffient)) + (0.5f * rightHeight * (rightHeight / approachCoeffient)); } } template
static float calculateSampleFitLinearity (const std::vector
& samples, int T::*predictor) { // Compare the fitted line of first half of the samples to the fitted line of // the second half of the samples. Calculate a AABB that fully contains every // sample's x component and both fit lines in this range. Calculate the ratio // of the area between the lines and the AABB. const float epsilon = 1.e-6f; const int midPoint = (int)samples.size() / 2; const LineParametersWithConfidence startApproximation = fitLineToSamples(samples, 0, midPoint, &T::SampleType::fitResponseDuration); const LineParametersWithConfidence endApproximation = fitLineToSamples(samples, midPoint, (int)samples.size(), &T::SampleType::fitResponseDuration); const float aabbMinX = (float)(samples.front().*predictor); const float aabbMinY = de::min(startApproximation.offset + startApproximation.coefficient*aabbMinX, endApproximation.offset + endApproximation.coefficient*aabbMinX); const float aabbMaxX = (float)(samples.back().*predictor); const float aabbMaxY = de::max(startApproximation.offset + startApproximation.coefficient*aabbMaxX, endApproximation.offset + endApproximation.coefficient*aabbMaxX); const float aabbArea = (aabbMaxX - aabbMinX) * (aabbMaxY - aabbMinY); const float areaBetweenLines = getAreaBetweenLines(aabbMinX, aabbMaxX, startApproximation.offset, startApproximation.coefficient, endApproximation.offset, endApproximation.coefficient); const float errorAreaRatio = (aabbArea < epsilon) ? (1.0f) : (areaBetweenLines / aabbArea); return de::clamp(1.0f - errorAreaRatio, 0.0f, 1.0f); } template
static float calculateSampleFitLinearity (const std::vector
>& samples) { return calculateSampleFitLinearity(samples, &UploadSampleResult
::writtenSize); } template
static float calculateSampleFitLinearity (const std::vector
>& samples) { return calculateSampleFitLinearity(samples, &RenderSampleResult
::renderDataSize); } template
static float calculateSampleTemporalStability (const std::vector
& samples, int T::*predictor) { // Samples are sampled in the following order: 1) even samples (in random order) 2) odd samples (in random order) // Compare the fitted line of even samples to the fitted line of the odd samples. Calculate a AABB that fully // contains every sample's x component and both fit lines in this range. Calculate the ratio of the area between // the lines and the AABB. const float epsilon = 1.e-6f; const LineParametersWithConfidence evenApproximation = fitLineToSamples(samples, 0, (int)samples.size(), 2, &T::SampleType::fitResponseDuration); const LineParametersWithConfidence oddApproximation = fitLineToSamples(samples, 1, (int)samples.size(), 2, &T::SampleType::fitResponseDuration); const float aabbMinX = (float)(samples.front().*predictor); const float aabbMinY = de::min(evenApproximation.offset + evenApproximation.coefficient*aabbMinX, oddApproximation.offset + oddApproximation.coefficient*aabbMinX); const float aabbMaxX = (float)(samples.back().*predictor); const float aabbMaxY = de::max(evenApproximation.offset + evenApproximation.coefficient*aabbMaxX, oddApproximation.offset + oddApproximation.coefficient*aabbMaxX); const float aabbArea = (aabbMaxX - aabbMinX) * (aabbMaxY - aabbMinY); const float areaBetweenLines = getAreaBetweenLines(aabbMinX, aabbMaxX, evenApproximation.offset, evenApproximation.coefficient, oddApproximation.offset, oddApproximation.coefficient); const float errorAreaRatio = (aabbArea < epsilon) ? (1.0f) : (areaBetweenLines / aabbArea); return de::clamp(1.0f - errorAreaRatio, 0.0f, 1.0f); } template
static float calculateSampleTemporalStability (const std::vector
>& samples) { return calculateSampleTemporalStability(samples, &UploadSampleResult
::writtenSize); } template
static float calculateSampleTemporalStability (const std::vector
>& samples) { return calculateSampleTemporalStability(samples, &RenderSampleResult
::renderDataSize); } template
static void bucketizeSamplesUniformly (const std::vector
>& samples, std::vector
>* buckets, int numBuckets, int& minBufferSize, int& maxBufferSize) { minBufferSize = 0; maxBufferSize = 0; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { DE_ASSERT(samples[sampleNdx].allocatedSize != 0); if (!minBufferSize || samples[sampleNdx].allocatedSize < minBufferSize) minBufferSize = samples[sampleNdx].allocatedSize; if (!maxBufferSize || samples[sampleNdx].allocatedSize > maxBufferSize) maxBufferSize = samples[sampleNdx].allocatedSize; } for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float bucketNdxFloat = (samples[sampleNdx].allocatedSize - minBufferSize) / (float)(maxBufferSize - minBufferSize) * numBuckets; const int bucketNdx = de::clamp((int)deFloatFloor(bucketNdxFloat), 0, numBuckets-1); buckets[bucketNdx].push_back(samples[sampleNdx]); } } template
static typename EnableIf
::HAS_MAP_STATS>::Type logMapRangeStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { log << tcu::TestLog::Float("MapRangeMin", "MapRange: Min time", "us", QP_KEY_TAG_TIME, stats.map.minTime) << tcu::TestLog::Float("MapRangeMax", "MapRange: Max time", "us", QP_KEY_TAG_TIME, stats.map.maxTime) << tcu::TestLog::Float("MapRangeMin90", "MapRange: 90%-Min time", "us", QP_KEY_TAG_TIME, stats.map.min2DecileTime) << tcu::TestLog::Float("MapRangeMax90", "MapRange: 90%-Max time", "us", QP_KEY_TAG_TIME, stats.map.max9DecileTime) << tcu::TestLog::Float("MapRangeMedian", "MapRange: Median time", "us", QP_KEY_TAG_TIME, stats.map.medianTime); } template
static typename EnableIf
::HAS_UNMAP_STATS>::Type logUnmapStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { log << tcu::TestLog::Float("UnmapMin", "Unmap: Min time", "us", QP_KEY_TAG_TIME, stats.unmap.minTime) << tcu::TestLog::Float("UnmapMax", "Unmap: Max time", "us", QP_KEY_TAG_TIME, stats.unmap.maxTime) << tcu::TestLog::Float("UnmapMin90", "Unmap: 90%-Min time", "us", QP_KEY_TAG_TIME, stats.unmap.min2DecileTime) << tcu::TestLog::Float("UnmapMax90", "Unmap: 90%-Max time", "us", QP_KEY_TAG_TIME, stats.unmap.max9DecileTime) << tcu::TestLog::Float("UnmapMedian", "Unmap: Median time", "us", QP_KEY_TAG_TIME, stats.unmap.medianTime); } template
static typename EnableIf
::HAS_WRITE_STATS>::Type logWriteStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { log << tcu::TestLog::Float("WriteMin", "Write: Min time", "us", QP_KEY_TAG_TIME, stats.write.minTime) << tcu::TestLog::Float("WriteMax", "Write: Max time", "us", QP_KEY_TAG_TIME, stats.write.maxTime) << tcu::TestLog::Float("WriteMin90", "Write: 90%-Min time", "us", QP_KEY_TAG_TIME, stats.write.min2DecileTime) << tcu::TestLog::Float("WriteMax90", "Write: 90%-Max time", "us", QP_KEY_TAG_TIME, stats.write.max9DecileTime) << tcu::TestLog::Float("WriteMedian", "Write: Median time", "us", QP_KEY_TAG_TIME, stats.write.medianTime); } template
static typename EnableIf
::HAS_FLUSH_STATS>::Type logFlushStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { log << tcu::TestLog::Float("FlushMin", "Flush: Min time", "us", QP_KEY_TAG_TIME, stats.flush.minTime) << tcu::TestLog::Float("FlushMax", "Flush: Max time", "us", QP_KEY_TAG_TIME, stats.flush.maxTime) << tcu::TestLog::Float("FlushMin90", "Flush: 90%-Min time", "us", QP_KEY_TAG_TIME, stats.flush.min2DecileTime) << tcu::TestLog::Float("FlushMax90", "Flush: 90%-Max time", "us", QP_KEY_TAG_TIME, stats.flush.max9DecileTime) << tcu::TestLog::Float("FlushMedian", "Flush: Median time", "us", QP_KEY_TAG_TIME, stats.flush.medianTime); } template
static typename EnableIf
::HAS_ALLOC_STATS>::Type logAllocStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { log << tcu::TestLog::Float("AllocMin", "Alloc: Min time", "us", QP_KEY_TAG_TIME, stats.alloc.minTime) << tcu::TestLog::Float("AllocMax", "Alloc: Max time", "us", QP_KEY_TAG_TIME, stats.alloc.maxTime) << tcu::TestLog::Float("AllocMin90", "Alloc: 90%-Min time", "us", QP_KEY_TAG_TIME, stats.alloc.min2DecileTime) << tcu::TestLog::Float("AllocMax90", "Alloc: 90%-Max time", "us", QP_KEY_TAG_TIME, stats.alloc.max9DecileTime) << tcu::TestLog::Float("AllocMedian", "Alloc: Median time", "us", QP_KEY_TAG_TIME, stats.alloc.medianTime); } template
static typename EnableIfNot
::HAS_MAP_STATS>::Type logMapRangeStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_UNMAP_STATS>::Type logUnmapStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_WRITE_STATS>::Type logWriteStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_FLUSH_STATS>::Type logFlushStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_ALLOC_STATS>::Type logAllocStats (tcu::TestLog& log, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(stats); } template
static typename EnableIf
::HAS_MAP_STATS>::Type logMapContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::mapDuration); log << tcu::TestLog::Float("MapConstantCost", "Map: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("MapLinearCost", "Map: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("MapMedianCost", "Map: Median cost", "us", QP_KEY_TAG_TIME, stats.map.medianTime); } template
static typename EnableIf
::HAS_UNMAP_STATS>::Type logUnmapContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::unmapDuration); log << tcu::TestLog::Float("UnmapConstantCost", "Unmap: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("UnmapLinearCost", "Unmap: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("UnmapMedianCost", "Unmap: Median cost", "us", QP_KEY_TAG_TIME, stats.unmap.medianTime); } template
static typename EnableIf
::HAS_WRITE_STATS>::Type logWriteContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::writeDuration); log << tcu::TestLog::Float("WriteConstantCost", "Write: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("WriteLinearCost", "Write: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("WriteMedianCost", "Write: Median cost", "us", QP_KEY_TAG_TIME, stats.write.medianTime); } template
static typename EnableIf
::HAS_FLUSH_STATS>::Type logFlushContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::flushDuration); log << tcu::TestLog::Float("FlushConstantCost", "Flush: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("FlushLinearCost", "Flush: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("FlushMedianCost", "Flush: Median cost", "us", QP_KEY_TAG_TIME, stats.flush.medianTime); } template
static typename EnableIf
::HAS_ALLOC_STATS>::Type logAllocContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::allocDuration); log << tcu::TestLog::Float("AllocConstantCost", "Alloc: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("AllocLinearCost", "Alloc: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("AllocMedianCost", "Alloc: Median cost", "us", QP_KEY_TAG_TIME, stats.alloc.medianTime); } template
static typename EnableIf
::HAS_RENDER_STATS>::Type logRenderContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::renderDuration); log << tcu::TestLog::Float("DrawCallConstantCost", "DrawCall: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("DrawCallLinearCost", "DrawCall: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("DrawCallMedianCost", "DrawCall: Median cost", "us", QP_KEY_TAG_TIME, stats.render.medianTime); } template
static typename EnableIf
::HAS_READ_STATS>::Type logReadContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::readDuration); log << tcu::TestLog::Float("ReadConstantCost", "Read: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("ReadLinearCost", "Read: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("ReadMedianCost", "Read: Median cost", "us", QP_KEY_TAG_TIME, stats.read.medianTime); } template
static typename EnableIf
::HAS_UPLOAD_STATS>::Type logUploadContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::uploadDuration); log << tcu::TestLog::Float("UploadConstantCost", "Upload: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("UploadLinearCost", "Upload: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("UploadMedianCost", "Upload: Median cost", "us", QP_KEY_TAG_TIME, stats.upload.medianTime); } template
static typename EnableIf
::HAS_TOTAL_STATS>::Type logTotalContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::totalDuration); log << tcu::TestLog::Float("TotalConstantCost", "Total: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("TotalLinearCost", "Total: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("TotalMedianCost", "Total: Median cost", "us", QP_KEY_TAG_TIME, stats.total.medianTime); } template
static typename EnableIf
::HAS_FIRST_RENDER_STATS>::Type logFirstRenderContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::firstRenderDuration); log << tcu::TestLog::Float("FirstDrawCallConstantCost", "First DrawCall: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("FirstDrawCallLinearCost", "First DrawCall: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("FirstDrawCallMedianCost", "First DrawCall: Median cost", "us", QP_KEY_TAG_TIME, stats.firstRender.medianTime); } template
static typename EnableIf
::HAS_SECOND_RENDER_STATS>::Type logSecondRenderContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { const LineParametersWithConfidence contributionFitting = fitLineToSamples(samples, &SampleType::secondRenderDuration); log << tcu::TestLog::Float("SecondDrawCallConstantCost", "Second DrawCall: Approximated contant cost", "us", QP_KEY_TAG_TIME, contributionFitting.offset) << tcu::TestLog::Float("SecondDrawCallLinearCost", "Second DrawCall: Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, contributionFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("SecondDrawCallMedianCost", "Second DrawCall: Median cost", "us", QP_KEY_TAG_TIME, stats.secondRender.medianTime); } template
static typename EnableIfNot
::HAS_MAP_STATS>::Type logMapContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_UNMAP_STATS>::Type logUnmapContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_WRITE_STATS>::Type logWriteContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_FLUSH_STATS>::Type logFlushContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_ALLOC_STATS>::Type logAllocContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_RENDER_STATS>::Type logRenderContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_READ_STATS>::Type logReadContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_UPLOAD_STATS>::Type logUploadContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_TOTAL_STATS>::Type logTotalContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_FIRST_RENDER_STATS>::Type logFirstRenderContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } template
static typename EnableIfNot
::HAS_SECOND_RENDER_STATS>::Type logSecondRenderContribution (tcu::TestLog& log, const std::vector
>& samples, const typename SampleTypeTraits
::StatsType& stats) { DE_UNREF(log); DE_UNREF(samples); DE_UNREF(stats); } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("WrittenSize", "Written size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("BufferSize", "Buffer size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("UploadTime", "Upload time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].writtenSize); log << tcu::TestLog::Sample << samples[sampleNdx].writtenSize << samples[sampleNdx].bufferSize << (int)samples[sampleNdx].duration.totalDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("WrittenSize", "Written size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("BufferSize", "Buffer size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("AllocTime", "Alloc time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("MapTime", "Map time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("UnmapTime", "Unmap time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("WriteTime", "Write time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].writtenSize); log << tcu::TestLog::Sample << samples[sampleNdx].writtenSize << samples[sampleNdx].bufferSize << (int)samples[sampleNdx].duration.totalDuration << (int)samples[sampleNdx].duration.allocDuration << (int)samples[sampleNdx].duration.mapDuration << (int)samples[sampleNdx].duration.unmapDuration << (int)samples[sampleNdx].duration.writeDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("WrittenSize", "Written size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("BufferSize", "Buffer size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("MapTime", "Map time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("UnmapTime", "Unmap time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("WriteTime", "Write time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].writtenSize); log << tcu::TestLog::Sample << samples[sampleNdx].writtenSize << samples[sampleNdx].bufferSize << (int)samples[sampleNdx].duration.totalDuration << (int)samples[sampleNdx].duration.mapDuration << (int)samples[sampleNdx].duration.unmapDuration << (int)samples[sampleNdx].duration.writeDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("WrittenSize", "Written size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("BufferSize", "Buffer size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("AllocTime", "Alloc time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("MapTime", "Map time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("UnmapTime", "Unmap time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("WriteTime", "Write time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FlushTime", "Flush time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].writtenSize); log << tcu::TestLog::Sample << samples[sampleNdx].writtenSize << samples[sampleNdx].bufferSize << (int)samples[sampleNdx].duration.totalDuration << (int)samples[sampleNdx].duration.allocDuration << (int)samples[sampleNdx].duration.mapDuration << (int)samples[sampleNdx].duration.unmapDuration << (int)samples[sampleNdx].duration.writeDuration << (int)samples[sampleNdx].duration.flushDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("WrittenSize", "Written size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("BufferSize", "Buffer size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("MapTime", "Map time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("UnmapTime", "Unmap time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("WriteTime", "Write time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FlushTime", "Flush time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].writtenSize); log << tcu::TestLog::Sample << samples[sampleNdx].writtenSize << samples[sampleNdx].bufferSize << (int)samples[sampleNdx].duration.totalDuration << (int)samples[sampleNdx].duration.mapDuration << (int)samples[sampleNdx].duration.unmapDuration << (int)samples[sampleNdx].duration.writeDuration << (int)samples[sampleNdx].duration.flushDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("DataSize", "Data processed", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("VertexCount", "Number of vertices", "vertices", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("DrawCallTime", "Draw call time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("ReadTime", "ReadPixels time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].renderDataSize); log << tcu::TestLog::Sample << samples[sampleNdx].renderDataSize << samples[sampleNdx].numVertices << (int)samples[sampleNdx].duration.renderReadDuration << (int)samples[sampleNdx].duration.renderDuration << (int)samples[sampleNdx].duration.readDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("DataSize", "Data processed", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("VertexCount", "Number of vertices", "vertices", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("UnrelatedUploadSize", "Unrelated upload size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("DrawCallTime", "Draw call time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("ReadTime", "ReadPixels time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].renderDataSize); log << tcu::TestLog::Sample << samples[sampleNdx].renderDataSize << samples[sampleNdx].numVertices << samples[sampleNdx].unrelatedDataSize << (int)samples[sampleNdx].duration.renderReadDuration << (int)samples[sampleNdx].duration.renderDuration << (int)samples[sampleNdx].duration.readDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("DataSize", "Data processed", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("UploadSize", "Data uploaded", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("VertexCount", "Number of vertices", "vertices", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("DrawReadTime", "Draw call and ReadPixels time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("Upload time", "Upload time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("DrawCallTime", "Draw call time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("ReadTime", "ReadPixels time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].renderDataSize); log << tcu::TestLog::Sample << samples[sampleNdx].renderDataSize << samples[sampleNdx].uploadedDataSize << samples[sampleNdx].numVertices << (int)samples[sampleNdx].duration.renderReadDuration << (int)samples[sampleNdx].duration.totalDuration << (int)samples[sampleNdx].duration.uploadDuration << (int)samples[sampleNdx].duration.renderDuration << (int)samples[sampleNdx].duration.readDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("DataSize", "Data processed", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("UploadSize", "Data uploaded", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("VertexCount", "Number of vertices", "vertices", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("UnrelatedUploadSize", "Unrelated upload size", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("DrawReadTime", "Draw call and ReadPixels time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("Upload time", "Upload time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("DrawCallTime", "Draw call time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("ReadTime", "ReadPixels time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].renderDataSize); log << tcu::TestLog::Sample << samples[sampleNdx].renderDataSize << samples[sampleNdx].uploadedDataSize << samples[sampleNdx].numVertices << samples[sampleNdx].unrelatedDataSize << (int)samples[sampleNdx].duration.renderReadDuration << (int)samples[sampleNdx].duration.totalDuration << (int)samples[sampleNdx].duration.uploadDuration << (int)samples[sampleNdx].duration.renderDuration << (int)samples[sampleNdx].duration.readDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } void logSampleList (tcu::TestLog& log, const LineParametersWithConfidence& theilSenFitting, const std::vector
>& samples) { log << tcu::TestLog::SampleList("Samples", "Samples") << tcu::TestLog::SampleInfo << tcu::TestLog::ValueInfo("DataSize", "Data processed", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("UploadSize", "Data uploaded", "bytes", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("VertexCount", "Number of vertices", "vertices", QP_SAMPLE_VALUE_TAG_PREDICTOR) << tcu::TestLog::ValueInfo("DrawReadTime", "Second draw call and ReadPixels time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("TotalTime", "Total time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FirstDrawCallTime", "First draw call time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("Upload time", "Upload time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("SecondDrawCallTime", "Second draw call time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("ReadTime", "ReadPixels time", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::ValueInfo("FitResidual", "Fit residual", "us", QP_SAMPLE_VALUE_TAG_RESPONSE) << tcu::TestLog::EndSampleInfo; for (int sampleNdx = 0; sampleNdx < (int)samples.size(); ++sampleNdx) { const float fitResidual = samples[sampleNdx].duration.fitResponseDuration - (theilSenFitting.offset + theilSenFitting.coefficient * samples[sampleNdx].renderDataSize); log << tcu::TestLog::Sample << samples[sampleNdx].renderDataSize << samples[sampleNdx].uploadedDataSize << samples[sampleNdx].numVertices << (int)samples[sampleNdx].duration.renderReadDuration << (int)samples[sampleNdx].duration.totalDuration << (int)samples[sampleNdx].duration.firstRenderDuration << (int)samples[sampleNdx].duration.uploadDuration << (int)samples[sampleNdx].duration.secondRenderDuration << (int)samples[sampleNdx].duration.readDuration << fitResidual << tcu::TestLog::EndSample; } log << tcu::TestLog::EndSampleList; } template
static UploadSampleAnalyzeResult analyzeSampleResults (tcu::TestLog& log, const std::vector
>& samples, bool logBucketPerformance) { // Assume data is linear with some outliers, fit a line const LineParametersWithConfidence theilSenFitting = fitLineToSamples(samples); const typename SampleTypeTraits
::StatsType resultStats = calculateSampleStatistics(theilSenFitting, samples); float approximatedTransferRate; float approximatedTransferRateNoConstant; // Output raw samples { const tcu::ScopedLogSection section(log, "Samples", "Samples"); logSampleList(log, theilSenFitting, samples); } // Calculate results for different ranges if (logBucketPerformance) { const int numBuckets = 4; int minBufferSize = 0; int maxBufferSize = 0; std::vector
> buckets[numBuckets]; bucketizeSamplesUniformly(samples, &buckets[0], numBuckets, minBufferSize, maxBufferSize); for (int bucketNdx = 0; bucketNdx < numBuckets; ++bucketNdx) { if (buckets[bucketNdx].empty()) continue; // Print a nice result summary const int bucketRangeMin = minBufferSize + (int)(( bucketNdx / (float)numBuckets) * (maxBufferSize - minBufferSize)); const int bucketRangeMax = minBufferSize + (int)(((bucketNdx+1) / (float)numBuckets) * (maxBufferSize - minBufferSize)); const typename SampleTypeTraits
::StatsType stats = calculateSampleStatistics(theilSenFitting, buckets[bucketNdx]); const tcu::ScopedLogSection section (log, "BufferSizeRange", std::string("Transfer performance with buffer size in range [").append(getHumanReadableByteSize(bucketRangeMin).append(", ").append(getHumanReadableByteSize(bucketRangeMax).append("]")))); logMapRangeStats
(log, stats); logUnmapStats
(log, stats); logWriteStats
(log, stats); logFlushStats
(log, stats); logAllocStats
(log, stats); log << tcu::TestLog::Float("Min", "Total: Min time", "us", QP_KEY_TAG_TIME, stats.result.minTime) << tcu::TestLog::Float("Max", "Total: Max time", "us", QP_KEY_TAG_TIME, stats.result.maxTime) << tcu::TestLog::Float("Min90", "Total: 90%-Min time", "us", QP_KEY_TAG_TIME, stats.result.min2DecileTime) << tcu::TestLog::Float("Max90", "Total: 90%-Max time", "us", QP_KEY_TAG_TIME, stats.result.max9DecileTime) << tcu::TestLog::Float("Median", "Total: Median time", "us", QP_KEY_TAG_TIME, stats.result.medianTime) << tcu::TestLog::Float("MedianTransfer", "Median transfer rate", "MB / s", QP_KEY_TAG_PERFORMANCE, stats.medianRate / 1024.0f / 1024.0f) << tcu::TestLog::Float("MaxDiff", "Max difference to approximated", "us", QP_KEY_TAG_TIME, stats.maxDiffTime) << tcu::TestLog::Float("Max90Diff", "90%-Max difference to approximated", "us", QP_KEY_TAG_TIME, stats.maxDiff9DecileTime) << tcu::TestLog::Float("MedianDiff", "Median difference to approximated", "us", QP_KEY_TAG_TIME, stats.medianDiffTime) << tcu::TestLog::Float("MaxRelDiff", "Max relative difference to approximated", "%", QP_KEY_TAG_NONE, stats.maxRelDiffTime * 100.0f) << tcu::TestLog::Float("Max90RelDiff", "90%-Max relative difference to approximated", "%", QP_KEY_TAG_NONE, stats.max9DecileRelDiffTime * 100.0f) << tcu::TestLog::Float("MedianRelDiff", "Median relative difference to approximated", "%", QP_KEY_TAG_NONE, stats.medianRelDiffTime * 100.0f); } } // Contributions if (SampleTypeTraits
::LOG_CONTRIBUTIONS) { const tcu::ScopedLogSection section(log, "Contribution", "Contributions"); logMapContribution(log, samples, resultStats); logUnmapContribution(log, samples, resultStats); logWriteContribution(log, samples, resultStats); logFlushContribution(log, samples, resultStats); logAllocContribution(log, samples, resultStats); } // Print results { const tcu::ScopedLogSection section(log, "Results", "Results"); const int medianBufferSize = (samples.front().bufferSize + samples.back().bufferSize) / 2; const float approximatedTransferTime = (theilSenFitting.offset + theilSenFitting.coefficient * medianBufferSize) / 1000.0f / 1000.0f; const float approximatedTransferTimeNoConstant = (theilSenFitting.coefficient * medianBufferSize) / 1000.0f / 1000.0f; const float sampleLinearity = calculateSampleFitLinearity(samples); const float sampleTemporalStability = calculateSampleTemporalStability(samples); approximatedTransferRateNoConstant = medianBufferSize / approximatedTransferTimeNoConstant; approximatedTransferRate = medianBufferSize / approximatedTransferTime; log << tcu::TestLog::Float("ResultLinearity", "Sample linearity", "%", QP_KEY_TAG_QUALITY, sampleLinearity * 100.0f) << tcu::TestLog::Float("SampleTemporalStability", "Sample temporal stability", "%", QP_KEY_TAG_QUALITY, sampleTemporalStability * 100.0f) << tcu::TestLog::Float("ApproximatedConstantCost", "Approximated contant cost", "us", QP_KEY_TAG_TIME, theilSenFitting.offset) << tcu::TestLog::Float("ApproximatedConstantCostConfidence60Lower", "Approximated contant cost 60% confidence lower limit", "us", QP_KEY_TAG_TIME, theilSenFitting.offsetConfidenceLower) << tcu::TestLog::Float("ApproximatedConstantCostConfidence60Upper", "Approximated contant cost 60% confidence upper limit", "us", QP_KEY_TAG_TIME, theilSenFitting.offsetConfidenceUpper) << tcu::TestLog::Float("ApproximatedLinearCost", "Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, theilSenFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("ApproximatedLinearCostConfidence60Lower", "Approximated linear cost 60% confidence lower limit", "us / MB", QP_KEY_TAG_TIME, theilSenFitting.coefficientConfidenceLower * 1024.0f * 1024.0f) << tcu::TestLog::Float("ApproximatedLinearCostConfidence60Upper", "Approximated linear cost 60% confidence upper limit", "us / MB", QP_KEY_TAG_TIME, theilSenFitting.coefficientConfidenceUpper * 1024.0f * 1024.0f) << tcu::TestLog::Float("ApproximatedTransferRate", "Approximated transfer rate", "MB / s", QP_KEY_TAG_PERFORMANCE, approximatedTransferRate / 1024.0f / 1024.0f) << tcu::TestLog::Float("ApproximatedTransferRateNoConstant", "Approximated transfer rate without constant cost", "MB / s", QP_KEY_TAG_PERFORMANCE, approximatedTransferRateNoConstant / 1024.0f / 1024.0f) << tcu::TestLog::Float("SampleMedianTime", "Median sample time", "us", QP_KEY_TAG_TIME, resultStats.result.medianTime) << tcu::TestLog::Float("SampleMedianTransfer", "Median transfer rate", "MB / s", QP_KEY_TAG_PERFORMANCE, resultStats.medianRate / 1024.0f / 1024.0f); } // return approximated transfer rate { UploadSampleAnalyzeResult result; result.transferRateMedian = resultStats.medianRate; result.transferRateAtRange = approximatedTransferRate; result.transferRateAtInfinity = approximatedTransferRateNoConstant; return result; } } template
static RenderSampleAnalyzeResult analyzeSampleResults (tcu::TestLog& log, const std::vector
>& samples) { // Assume data is linear with some outliers, fit a line const LineParametersWithConfidence theilSenFitting = fitLineToSamples(samples); const typename SampleTypeTraits
::StatsType resultStats = calculateSampleStatistics(theilSenFitting, samples); float approximatedProcessingRate; float approximatedProcessingRateNoConstant; // output raw samples { const tcu::ScopedLogSection section(log, "Samples", "Samples"); logSampleList(log, theilSenFitting, samples); } // Contributions if (SampleTypeTraits
::LOG_CONTRIBUTIONS) { const tcu::ScopedLogSection section(log, "Contribution", "Contributions"); logFirstRenderContribution(log, samples, resultStats); logUploadContribution(log, samples, resultStats); logRenderContribution(log, samples, resultStats); logSecondRenderContribution(log, samples, resultStats); logReadContribution(log, samples, resultStats); logTotalContribution(log, samples, resultStats); } // print results { const tcu::ScopedLogSection section(log, "Results", "Results"); const int medianDataSize = (samples.front().renderDataSize + samples.back().renderDataSize) / 2; const float approximatedRenderTime = (theilSenFitting.offset + theilSenFitting.coefficient * medianDataSize) / 1000.0f / 1000.0f; const float approximatedRenderTimeNoConstant = (theilSenFitting.coefficient * medianDataSize) / 1000.0f / 1000.0f; const float sampleLinearity = calculateSampleFitLinearity(samples); const float sampleTemporalStability = calculateSampleTemporalStability(samples); approximatedProcessingRateNoConstant = medianDataSize / approximatedRenderTimeNoConstant; approximatedProcessingRate = medianDataSize / approximatedRenderTime; log << tcu::TestLog::Float("ResultLinearity", "Sample linearity", "%", QP_KEY_TAG_QUALITY, sampleLinearity * 100.0f) << tcu::TestLog::Float("SampleTemporalStability", "Sample temporal stability", "%", QP_KEY_TAG_QUALITY, sampleTemporalStability * 100.0f) << tcu::TestLog::Float("ApproximatedConstantCost", "Approximated contant cost", "us", QP_KEY_TAG_TIME, theilSenFitting.offset) << tcu::TestLog::Float("ApproximatedConstantCostConfidence60Lower", "Approximated contant cost 60% confidence lower limit", "us", QP_KEY_TAG_TIME, theilSenFitting.offsetConfidenceLower) << tcu::TestLog::Float("ApproximatedConstantCostConfidence60Upper", "Approximated contant cost 60% confidence upper limit", "us", QP_KEY_TAG_TIME, theilSenFitting.offsetConfidenceUpper) << tcu::TestLog::Float("ApproximatedLinearCost", "Approximated linear cost", "us / MB", QP_KEY_TAG_TIME, theilSenFitting.coefficient * 1024.0f * 1024.0f) << tcu::TestLog::Float("ApproximatedLinearCostConfidence60Lower", "Approximated linear cost 60% confidence lower limit", "us / MB", QP_KEY_TAG_TIME, theilSenFitting.coefficientConfidenceLower * 1024.0f * 1024.0f) << tcu::TestLog::Float("ApproximatedLinearCostConfidence60Upper", "Approximated linear cost 60% confidence upper limit", "us / MB", QP_KEY_TAG_TIME, theilSenFitting.coefficientConfidenceUpper * 1024.0f * 1024.0f) << tcu::TestLog::Float("ApproximatedProcessRate", "Approximated processing rate", "MB / s", QP_KEY_TAG_PERFORMANCE, approximatedProcessingRate / 1024.0f / 1024.0f) << tcu::TestLog::Float("ApproximatedProcessRateNoConstant", "Approximated processing rate without constant cost", "MB / s", QP_KEY_TAG_PERFORMANCE, approximatedProcessingRateNoConstant / 1024.0f / 1024.0f) << tcu::TestLog::Float("SampleMedianTime", "Median sample time", "us", QP_KEY_TAG_TIME, resultStats.result.medianTime) << tcu::TestLog::Float("SampleMedianProcess", "Median processing rate", "MB / s", QP_KEY_TAG_PERFORMANCE, resultStats.medianRate / 1024.0f / 1024.0f); } // return approximated render rate { RenderSampleAnalyzeResult result; result.renderRateMedian = resultStats.medianRate; result.renderRateAtRange = approximatedProcessingRate; result.renderRateAtInfinity = approximatedProcessingRateNoConstant; return result; } return RenderSampleAnalyzeResult(); } static void generateTwoPassRandomIterationOrder (std::vector
& iterationOrder, int numSamples) { de::Random rnd (0xabc); const int midPoint = (numSamples+1) / 2; // !< ceil(m_numSamples / 2) DE_ASSERT((int)iterationOrder.size() == numSamples); // Two "passes" over range, randomize order in both passes // This allows to us detect if iterations are not independent // (first run and later run samples differ significantly?) for (int sampleNdx = 0; sampleNdx < midPoint; ++sampleNdx) iterationOrder[sampleNdx] = sampleNdx * 2; for (int sampleNdx = midPoint; sampleNdx < numSamples; ++sampleNdx) iterationOrder[sampleNdx] = (sampleNdx - midPoint) * 2 + 1; for (int ndx = 0; ndx < midPoint; ++ndx) std::swap(iterationOrder[ndx], iterationOrder[rnd.getInt(0, midPoint - 1)]); for (int ndx = midPoint; ndx < (int)iterationOrder.size(); ++ndx) std::swap(iterationOrder[ndx], iterationOrder[rnd.getInt(midPoint, (int)iterationOrder.size()-1)]); } template
class BasicBufferCase : public TestCase { public: enum Flags { FLAG_ALLOCATE_LARGER_BUFFER = 0x01, }; BasicBufferCase (Context& context, const char* name, const char* desc, int bufferSizeMin, int bufferSizeMax, int numSamples, int flags); ~BasicBufferCase (void); virtual void init (void); virtual void deinit (void); protected: IterateResult iterate (void); virtual bool runSample (int iteration, UploadSampleResult
& sample) = 0; virtual void logAndSetTestResult (const std::vector
>& results) = 0; void disableGLWarmup (void); void waitGLResults (void); enum { DUMMY_RENDER_AREA_SIZE = 32 }; glu::ShaderProgram* m_dummyProgram; deInt32 m_dummyProgramPosLoc; deUint32 m_bufferID; const int m_numSamples; const int m_bufferSizeMin; const int m_bufferSizeMax; const bool m_allocateLargerBuffer; private: int m_iteration; std::vector
m_iterationOrder; std::vector
> m_results; bool m_useGL; int m_bufferRandomizerTimer; }; template
BasicBufferCase
::BasicBufferCase (Context& context, const char* name, const char* desc, int bufferSizeMin, int bufferSizeMax, int numSamples, int flags) : TestCase (context, tcu::NODETYPE_PERFORMANCE, name, desc) , m_dummyProgram (DE_NULL) , m_dummyProgramPosLoc (-1) , m_bufferID (0) , m_numSamples (numSamples) , m_bufferSizeMin (bufferSizeMin) , m_bufferSizeMax (bufferSizeMax) , m_allocateLargerBuffer ((flags & FLAG_ALLOCATE_LARGER_BUFFER) != 0) , m_iteration (0) , m_iterationOrder (numSamples) , m_results (numSamples) , m_useGL (true) , m_bufferRandomizerTimer (0) { // "randomize" iteration order. Deterministic, patternless generateTwoPassRandomIterationOrder(m_iterationOrder, m_numSamples); // choose buffer sizes for (int sampleNdx = 0; sampleNdx < m_numSamples; ++sampleNdx) { const int rawBufferSize = (int)deFloatFloor(bufferSizeMin + (bufferSizeMax - bufferSizeMin) * ((float)(sampleNdx + 1) / m_numSamples)); const int bufferSize = deAlign32(rawBufferSize, 16); const int allocatedBufferSize = deAlign32((m_allocateLargerBuffer) ? ((int)(bufferSize * 1.5f)) : (bufferSize), 16); m_results[sampleNdx].bufferSize = bufferSize; m_results[sampleNdx].allocatedSize = allocatedBufferSize; m_results[sampleNdx].writtenSize = -1; } } template
BasicBufferCase
::~BasicBufferCase (void) { deinit(); } template
void BasicBufferCase
::init (void) { const glw::Functions& gl = m_context.getRenderContext().getFunctions(); if (!m_useGL) return; // \note Viewport size is not checked, it won't matter if the render target actually is smaller hhan DUMMY_RENDER_AREA_SIZE // dummy shader m_dummyProgram = new glu::ShaderProgram(m_context.getRenderContext(), glu::ProgramSources() << glu::VertexSource(s_dummyVertexShader) << glu::FragmentSource(s_dummyFragnentShader)); if (!m_dummyProgram->isOk()) { m_testCtx.getLog() << *m_dummyProgram; throw tcu::TestError("failed to build shader program"); } m_dummyProgramPosLoc = gl.getAttribLocation(m_dummyProgram->getProgram(), "a_position"); if (m_dummyProgramPosLoc == -1) throw tcu::TestError("a_position location was -1"); } template
void BasicBufferCase
::deinit (void) { if (m_bufferID) { m_context.getRenderContext().getFunctions().deleteBuffers(1, &m_bufferID); m_bufferID = 0; } delete m_dummyProgram; m_dummyProgram = DE_NULL; } template
TestCase::IterateResult BasicBufferCase
::iterate (void) { const glw::Functions& gl = m_context.getRenderContext().getFunctions(); static bool buffersWarmedUp = false; static const deUint32 usages[] = { GL_STREAM_DRAW, GL_STREAM_READ, GL_STREAM_COPY, GL_STATIC_DRAW, GL_STATIC_READ, GL_STATIC_COPY, GL_DYNAMIC_DRAW, GL_DYNAMIC_READ, GL_DYNAMIC_COPY, }; // Allocate some random sized buffers and remove them to // make sure the first samples too have some buffers removed // just before their allocation. This is only needed by the // the first test. if (m_useGL && !buffersWarmedUp) { const int numRandomBuffers = 6; const int numRepeats = 10; const int maxBufferSize = 16777216; const std::vector
zeroData (maxBufferSize, 0x00); de::Random rnd (0x1234); deUint32 bufferIDs[numRandomBuffers] = {0}; gl.useProgram(m_dummyProgram->getProgram()); gl.viewport(0, 0, DUMMY_RENDER_AREA_SIZE, DUMMY_RENDER_AREA_SIZE); gl.enableVertexAttribArray(m_dummyProgramPosLoc); for (int ndx = 0; ndx < numRepeats; ++ndx) { // Create buffer and maybe draw from it for (int randomBufferNdx = 0; randomBufferNdx < numRandomBuffers; ++randomBufferNdx) { const int randomSize = deAlign32(rnd.getInt(1, maxBufferSize), 4*4); const deUint32 usage = usages[rnd.getUint32() % (deUint32)DE_LENGTH_OF_ARRAY(usages)]; gl.genBuffers(1, &bufferIDs[randomBufferNdx]); gl.bindBuffer(GL_ARRAY_BUFFER, bufferIDs[randomBufferNdx]); gl.bufferData(GL_ARRAY_BUFFER, randomSize, &zeroData[0], usage); if (rnd.getBool()) { gl.vertexAttribPointer(m_dummyProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL); gl.drawArrays(GL_POINTS, 0, 1); gl.drawArrays(GL_POINTS, randomSize / (int)sizeof(float[4]) - 1, 1); } } for (int randomBufferNdx = 0; randomBufferNdx < numRandomBuffers; ++randomBufferNdx) gl.deleteBuffers(1, &bufferIDs[randomBufferNdx]); waitGLResults(); GLU_EXPECT_NO_ERROR(gl.getError(), "Buffer gen"); m_testCtx.touchWatchdog(); } buffersWarmedUp = true; return CONTINUE; } else if (m_useGL && m_bufferRandomizerTimer++ % 8 == 0) { // Do some random buffer operations to every now and then // to make sure the previous test iterations won't affect // following test runs. const int numRandomBuffers = 3; const int maxBufferSize = 16777216; const std::vector
zeroData (maxBufferSize, 0x00); de::Random rnd (0x1234 + 0xabc * m_bufferRandomizerTimer); // BufferData { deUint32 bufferIDs[numRandomBuffers] = {0}; for (int randomBufferNdx = 0; randomBufferNdx < numRandomBuffers; ++randomBufferNdx) { const int randomSize = deAlign32(rnd.getInt(1, maxBufferSize), 4*4); const deUint32 usage = usages[rnd.getUint32() % (deUint32)DE_LENGTH_OF_ARRAY(usages)]; gl.genBuffers(1, &bufferIDs[randomBufferNdx]); gl.bindBuffer(GL_ARRAY_BUFFER, bufferIDs[randomBufferNdx]); gl.bufferData(GL_ARRAY_BUFFER, randomSize, &zeroData[0], usage); } for (int randomBufferNdx = 0; randomBufferNdx < numRandomBuffers; ++randomBufferNdx) gl.deleteBuffers(1, &bufferIDs[randomBufferNdx]); } GLU_EXPECT_NO_ERROR(gl.getError(), "buffer ops"); // Do some memory mappings { deUint32 bufferIDs[numRandomBuffers] = {0}; for (int randomBufferNdx = 0; randomBufferNdx < numRandomBuffers; ++randomBufferNdx) { const int randomSize = deAlign32(rnd.getInt(1, maxBufferSize), 4*4); const deUint32 usage = usages[rnd.getUint32() % (deUint32)DE_LENGTH_OF_ARRAY(usages)]; void* ptr; gl.genBuffers(1, &bufferIDs[randomBufferNdx]); gl.bindBuffer(GL_ARRAY_BUFFER, bufferIDs[randomBufferNdx]); gl.bufferData(GL_ARRAY_BUFFER, randomSize, &zeroData[0], usage); gl.vertexAttribPointer(m_dummyProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL); gl.drawArrays(GL_POINTS, 0, 1); gl.drawArrays(GL_POINTS, randomSize / (int)sizeof(float[4]) - 1, 1); if (rnd.getBool()) waitGLResults(); ptr = gl.mapBufferRange(GL_ARRAY_BUFFER, 0, randomSize, GL_MAP_WRITE_BIT); if (ptr) { medianTimeMemcpy(ptr, &zeroData[0], randomSize); gl.unmapBuffer(GL_ARRAY_BUFFER); } } for (int randomBufferNdx = 0; randomBufferNdx < numRandomBuffers; ++randomBufferNdx) gl.deleteBuffers(1, &bufferIDs[randomBufferNdx]); waitGLResults(); } GLU_EXPECT_NO_ERROR(gl.getError(), "buffer maps"); return CONTINUE; } else { const int currentIteration = m_iteration; const int sampleNdx = m_iterationOrder[currentIteration]; const bool sampleRunSuccessful = runSample(currentIteration, m_results[sampleNdx]); GLU_EXPECT_NO_ERROR(gl.getError(), "post runSample()"); // Retry failed samples if (!sampleRunSuccessful) return CONTINUE; if (++m_iteration >= m_numSamples) { logAndSetTestResult(m_results); return STOP; } else return CONTINUE; } } template
void BasicBufferCase
::disableGLWarmup (void) { m_useGL = false; } template
void BasicBufferCase
::waitGLResults (void) { tcu::Surface dummySurface(DUMMY_RENDER_AREA_SIZE, DUMMY_RENDER_AREA_SIZE); glu::readPixels(m_context.getRenderContext(), 0, 0, dummySurface.getAccess()); } template
class BasicUploadCase : public BasicBufferCase
{ public: enum CaseType { CASE_NO_BUFFERS = 0, CASE_NEW_BUFFER, CASE_UNSPECIFIED_BUFFER, CASE_SPECIFIED_BUFFER, CASE_USED_BUFFER, CASE_USED_LARGER_BUFFER, CASE_LAST }; enum CaseFlags { FLAG_DONT_LOG_BUFFER_INFO = 0x01, FLAG_RESULT_BUFFER_UNSPECIFIED_CONTENT = 0x02, }; enum ResultType { RESULT_MEDIAN_TRANSFER_RATE = 0, RESULT_ASYMPTOTIC_TRANSFER_RATE, }; BasicUploadCase (Context& context, const char* name, const char* desc, int bufferSizeMin, int bufferSizeMax, int numSamples, deUint32 bufferUsage, CaseType caseType, ResultType resultType, int flags = 0); ~BasicUploadCase (void); virtual void init (void); virtual void deinit (void); private: bool runSample (int iteration, UploadSampleResult
& sample); void createBuffer (int bufferSize, int iteration); void deleteBuffer (int bufferSize); void useBuffer (int bufferSize); virtual void testBufferUpload (UploadSampleResult
& result, int writeSize) = 0; void logAndSetTestResult (const std::vector
>& results); deUint32 m_dummyBufferID; protected: const CaseType m_caseType; const ResultType m_resultType; const deUint32 m_bufferUsage; const bool m_logBufferInfo; const bool m_bufferUnspecifiedContent; std::vector
m_zeroData; using BasicBufferCase
::m_testCtx; using BasicBufferCase
::m_context; using BasicBufferCase
::DUMMY_RENDER_AREA_SIZE; using BasicBufferCase
::m_dummyProgram; using BasicBufferCase
::m_dummyProgramPosLoc; using BasicBufferCase
::m_bufferID; using BasicBufferCase