Skip to content

Commit

Permalink
Format
Browse files Browse the repository at this point in the history
  • Loading branch information
justinchuby committed Nov 8, 2023
1 parent 5d53ba2 commit f2ffca0
Show file tree
Hide file tree
Showing 30 changed files with 93 additions and 70 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ class ThreadPoolProfiler {
int num_threads_;
#ifdef _MSC_VER
#pragma warning(push)
// C4324: structure was padded due to alignment specifier
// C4324: structure was padded due to alignment specifier
#pragma warning(disable : 4324)
#endif // _MSC_VER
struct ORT_ALIGN_TO_AVOID_FALSE_SHARING ChildThreadStat {
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/python/backend/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def is_opset_supported(cls, model):
error_message = (
"Skipping this test as only released onnx opsets are supported."
"To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0."
" Got Domain '{}' version '{}'.".format(domain, opset.version)
f" Got Domain '{domain}' version '{opset.version}'."
)
return False, error_message
except AttributeError:
Expand All @@ -74,7 +74,7 @@ def is_opset_supported(cls, model):
error_message = (
"Skipping this test as only released onnx opsets are supported."
"To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0."
" Got Domain '{}' version '{}'.".format(domain, opset.version)
f" Got Domain '{domain}' version '{opset.version}'."
)
return False, error_message
return True, ""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ def add_hidden_states_graph_output(self, model: ModelProto, optimized_onnx_path,

assert self.clip_skip >= 0 and self.clip_skip < hidden_layers

node_output_name = "/text_model/encoder/layers.{}/Add_1_output_0".format(hidden_layers - 1 - self.clip_skip)
node_output_name = f"/text_model/encoder/layers.{hidden_layers - 1 - self.clip_skip}/Add_1_output_0"

# search the name in outputs of all node
found = False
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1238,7 +1238,7 @@ TEST(MathOpTest, Sum_8_Test1) {
// This test runs fine on CPU Plugin
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider});
#else
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Expected output shape [{3,3,3}] did not match run output shape [{3,1,1}] for sum
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Expected output shape [{3,3,3}] did not match run output shape [{3,1,1}] for sum

Check warning on line 1241 in onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc#L1241

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc:1241:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
#endif
}

Expand All @@ -1264,7 +1264,7 @@ TEST(MathOpTest, Sum_8_Test1_double) {
// This test runs fine on CPU Plugin
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider});
#else
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Expected output shape [{3,3,3}] did not match run output shape [{3,1,1}] for sum
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Expected output shape [{3,3,3}] did not match run output shape [{3,1,1}] for sum

Check warning on line 1267 in onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc#L1267

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc:1267:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
#endif
}
TEST(MathOpTest, Sum_8_Test2) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1086,7 +1086,7 @@ TEST(ReductionOpTest, ReduceMax_int32) {
#if defined(OPENVINO_CONFIG_GPU_FP32) || defined(OPENVINO_CONFIG_GPU_FP16)
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // OpenVINO: Disabled temporarily
#else
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: axis must be 0
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: axis must be 0
#endif
}

Expand All @@ -1107,7 +1107,7 @@ TEST(ReductionOpTest, ReduceMax_int64) {
#if defined(OPENVINO_CONFIG_GPU_FP32) || defined(OPENVINO_CONFIG_GPU_FP16)
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // OpenVINO: Disabled temporarily
#else
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: axis must be 0
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: axis must be 0
#endif
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,9 @@ def fallback(self, log_level: _logger.LogLevel, *inputs, **kwargs):

# This warning will not be raised again if retry is not enabled
self._logger.warning(
"Fallback to PyTorch due to exception {} was triggered. "
f"Fallback to PyTorch due to exception {exception_type} was triggered. "
"Report this issue with a minimal repro at https://www.github.com/microsoft/onnxruntime. "
"See details below:\n\n{}".format(exception_type, exception_string)
f"See details below:\n\n{exception_string}"
)

self._raised_fallback_exception = True
Expand Down
10 changes: 4 additions & 6 deletions tools/ci_build/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -1637,9 +1637,7 @@ def run_adb_shell(cmd):
# GCOV_PREFIX specifies the root directory
# for creating the runtime code coverage files.
if args.code_coverage:
adb_shell(
"cd {0} && GCOV_PREFIX={0} GCOV_PREFIX_STRIP={1} {2}".format(device_dir, cwd.count(os.sep) + 1, cmd)
)
adb_shell(f"cd {device_dir} && GCOV_PREFIX={device_dir} GCOV_PREFIX_STRIP={cwd.count(os.sep) + 1} {cmd}")
else:
adb_shell(f"cd {device_dir} && {cmd}")

Expand Down Expand Up @@ -1689,7 +1687,7 @@ def run_adb_shell(cmd):
)

if args.use_nnapi:
run_adb_shell("{0}/onnx_test_runner -e nnapi {0}/test".format(device_dir))
run_adb_shell(f"{device_dir}/onnx_test_runner -e nnapi {device_dir}/test")
else:
run_adb_shell(f"{device_dir}/onnx_test_runner {device_dir}/test")

Expand All @@ -1702,9 +1700,9 @@ def run_adb_shell(cmd):
adb_push("onnxruntime_customopregistration_test", device_dir, cwd=cwd)
adb_shell(f"chmod +x {device_dir}/onnxruntime_shared_lib_test")
adb_shell(f"chmod +x {device_dir}/onnxruntime_customopregistration_test")
run_adb_shell("LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} {0}/onnxruntime_shared_lib_test".format(device_dir))
run_adb_shell(f"LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{device_dir} {device_dir}/onnxruntime_shared_lib_test")
run_adb_shell(
"LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} {0}/onnxruntime_customopregistration_test".format(device_dir)
f"LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{device_dir} {device_dir}/onnxruntime_customopregistration_test"
)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def get_call_args_from_file(filename: str, function_or_declaration: str) -> typi
# TODO: handle automatically by merging lines
log.error(
"Call/Declaration is split over multiple lines. Please check manually."
"File:{} Line:{}".format(filename, line_num)
f"File:{filename} Line:{line_num}"
)
continue

Expand Down
3 changes: 2 additions & 1 deletion winml/lib/Api.Image/inc/ImageConversionHelpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,5 +52,6 @@ bool VideoFramesHaveSameDevice(const wm::IVideoFrame& video_frame_1, const wm::I
wgdx::Direct3D11::IDirect3DDevice GetDeviceFromDirect3DSurface(const wgdx::Direct3D11::IDirect3DSurface& d3dSurface);

constexpr std::array<DXGI_FORMAT, 3> supportedWinMLFormats = {
DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM};
DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM
};
} // namespace _winml::Imaging
5 changes: 1 addition & 4 deletions winml/lib/Api.Image/inc/TensorToVideoFrameConverter.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,7 @@ class TensorToVideoFrameConverter : public ImageConverter {

private:
GUID _d3d11TextureGUID = {
0x14bf1054,
0x6ce7,
0x4c00,
{0xa1, 0x32, 0xb0, 0xf2, 0x11, 0x5D, 0xE0, 0x7f}
0x14bf1054, 0x6ce7, 0x4c00, {0xa1, 0x32, 0xb0, 0xf2, 0x11, 0x5D, 0xE0, 0x7f}
}; // {14BF1054-6CE7-4C00-A132-B0F2115DE07F}
GUID _handleGUID = {
0x700148fc, 0xc0cb, 0x4a7e, {0xa7, 0xc0, 0xe7, 0x43, 0xc1, 0x9, 0x9d, 0x62}
Expand Down
5 changes: 1 addition & 4 deletions winml/lib/Api.Image/inc/VideoFrameToTensorConverter.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,7 @@ class VideoFrameToTensorConverter : public ImageConverter {

private:
GUID d3d11_texture_GUID_ = {
0x485e4bb3,
0x3fe8,
0x497b,
{0x85, 0x9e, 0xc7, 0x5, 0x18, 0xdb, 0x11, 0x2a}
0x485e4bb3, 0x3fe8, 0x497b, {0x85, 0x9e, 0xc7, 0x5, 0x18, 0xdb, 0x11, 0x2a}
}; // {485E4BB3-3FE8-497B-859E-C70518DB112A}
GUID handle_GUID_ = {
0xce43264e, 0x41f7, 0x4882, {0x9e, 0x20, 0xfa, 0xa5, 0x1e, 0x37, 0x64, 0xfc}
Expand Down
6 changes: 4 additions & 2 deletions winml/lib/Api.Ort/OnnxruntimeModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ HRESULT ModelInfo::RuntimeClassInitialize(_In_ OnnxruntimeEngineFactory* engine_
winml_adapter_api->ModelGetInputCount,
winml_adapter_api->ModelGetInputName,
winml_adapter_api->ModelGetInputDescription,
winml_adapter_api->ModelGetInputTypeInfo};
winml_adapter_api->ModelGetInputTypeInfo
};

// Create inputs
std::vector<OnnxruntimeValueInfoWrapper> inputs;
Expand All @@ -93,7 +94,8 @@ HRESULT ModelInfo::RuntimeClassInitialize(_In_ OnnxruntimeEngineFactory* engine_
winml_adapter_api->ModelGetOutputCount,
winml_adapter_api->ModelGetOutputName,
winml_adapter_api->ModelGetOutputDescription,
winml_adapter_api->ModelGetOutputTypeInfo};
winml_adapter_api->ModelGetOutputTypeInfo
};

std::vector<OnnxruntimeValueInfoWrapper> outputs;
RETURN_IF_FAILED(CreateFeatureDescriptors(engine_factory, &output_helpers, ort_model, outputs));
Expand Down
3 changes: 2 additions & 1 deletion winml/lib/Api/impl/TensorBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,8 @@ struct TensorBase : TBase {
}

D3D12_HEAP_PROPERTIES heapProperties = {
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
};
D3D12_RESOURCE_DESC resourceDesc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
Expand Down
6 changes: 4 additions & 2 deletions winml/test/adapter/AdapterDmlEpTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ std::array<float, tensor_size> tensor_values = {};
winrt::com_ptr<ID3D12Resource> CreateD3D12Resource(ID3D12Device& device) {
constexpr uint64_t buffer_size = tensor_size * sizeof(float);
constexpr D3D12_HEAP_PROPERTIES heap_properties = {
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
};
constexpr D3D12_RESOURCE_DESC resource_desc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
Expand Down Expand Up @@ -365,6 +366,7 @@ const AdapterDmlEpTestApi& getapi() {
DmlCopyTensor,
CreateCustomRegistry,
ValueGetDeviceId,
SessionGetInputRequiredDeviceId};
SessionGetInputRequiredDeviceId
};
return api;
}
3 changes: 2 additions & 1 deletion winml/test/adapter/AdapterSessionTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,8 @@ const AdapterSessionTestAPI& getapi() {
Profiling,
CopyInputAcrossDevices,
CopyInputAcrossDevices_DML,
GetNumberOfIntraOpThreads};
GetNumberOfIntraOpThreads
};

if (SkipGpuTests()) {
api.AppendExecutionProvider_DML = SkipTest;
Expand Down
9 changes: 6 additions & 3 deletions winml/test/api/LearningModelAPITest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -247,9 +247,11 @@ static void CheckLearningModelPixelRange() {
// Normalized_0_1 and image output
L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_0_1.onnx",
// Normalized_1_1 and image output
L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_1_1.onnx"};
L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_1_1.onnx"
};
std::vector<LearningModelPixelRange> pixelRanges = {
LearningModelPixelRange::ZeroTo255, LearningModelPixelRange::ZeroToOne, LearningModelPixelRange::MinusOneToOne};
LearningModelPixelRange::ZeroTo255, LearningModelPixelRange::ZeroToOne, LearningModelPixelRange::MinusOneToOne
};
for (uint32_t model_i = 0; model_i < modelPaths.size(); model_i++) {
LearningModel learningModel = nullptr;
WINML_EXPECT_NO_THROW(APITest::LoadModel(modelPaths[model_i], learningModel));
Expand Down Expand Up @@ -329,7 +331,8 @@ const LearningModelApiTestsApi& getapi() {
CloseModelCheckEval,
CloseModelNoNewSessions,
CheckMetadataCaseInsensitive,
CreateCorruptModel};
CreateCorruptModel
};

if (RuntimeParameterExists(L"noVideoFrameTests")) {
api.CloseModelCheckEval = SkipTest;
Expand Down
3 changes: 2 additions & 1 deletion winml/test/api/LearningModelBindingAPITest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,8 @@ const LearningModelBindingAPITestsApi& getapi() {
VerifyOutputAfterEvaluateAsyncCalledTwice,
VerifyOutputAfterImageBindCalledTwice,
SequenceLengthTensorFloat,
SequenceConstructTensorString};
SequenceConstructTensorString
};

if (SkipGpuTests()) {
api.GpuSqueezeNet = SkipTest;
Expand Down
9 changes: 6 additions & 3 deletions winml/test/api/LearningModelSessionAPITest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -793,7 +793,8 @@ static void STFT(
auto n_dfts = static_cast<size_t>(1 + floor((signal_size - dft_size) / hop_size));
auto input_shape = std::vector<int64_t>{1, INT64(signal_size)};
auto output_shape = std::vector<int64_t>{
INT64(batch_size), INT64(n_dfts), is_onesided ? ((INT64(dft_size) >> 1) + 1) : INT64(dft_size), 2};
INT64(batch_size), INT64(n_dfts), is_onesided ? ((INT64(dft_size) >> 1) + 1) : INT64(dft_size), 2
};
auto dft_length = TensorInt64Bit::CreateFromArray({}, {INT64(dft_size)});

auto model =
Expand Down Expand Up @@ -1372,7 +1373,8 @@ static void ModelBuilding_GridSample_Internal(LearningModelDeviceKind kind) {
5.0000f,
5.0000f,
10.0000f,
10.0000f};
10.0000f
};
input_dims = {1, 1, 3, 2};
grid_dims = {1, 2, 4, 2};

Expand Down Expand Up @@ -2312,7 +2314,8 @@ const LearningModelSessionAPITestsApi& getapi() {
ModelBuilding_STFT,
ModelBuilding_MelSpectrogramOnThreeToneSignal,
ModelBuilding_MelWeightMatrix,
SetName};
SetName
};

if (SkipGpuTests()) {
api.CreateSessionDeviceDirectX = SkipTest;
Expand Down
3 changes: 2 additions & 1 deletion winml/test/api/RawApiHelpers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ void RunOnDevice(ml::learning_model& model, ml::learning_model_device& device, I
auto channel_buffers_pointers = std::vector<float*>{
&input_data.at(0),
&input_data.at(0) + channel_buffers_sizes[0],
&input_data.at(0) + channel_buffers_sizes[0] + +channel_buffers_sizes[1]};
&input_data.at(0) + channel_buffers_sizes[0] + +channel_buffers_sizes[1]
};

WINML_EXPECT_HRESULT_SUCCEEDED(binding->bind_as_references<float>(
input_name,
Expand Down
3 changes: 2 additions & 1 deletion winml/test/api/RawApiTestsGpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,8 @@ const RawApiTestsGpuApi& getapi() {
CreateDirectXMinPowerDevice,
Evaluate,
EvaluateNoInputCopy,
EvaluateManyBuffers};
EvaluateManyBuffers
};

if (SkipGpuTests()) {
api.CreateDirectXDevice = SkipTest;
Expand Down
12 changes: 8 additions & 4 deletions winml/test/concurrency/ConcurrencyTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,8 @@ void EvalAsyncDifferentBindings() {
std::vector<EvaluationUnit> evaluation_units(num_units, EvaluationUnit());

std::vector<ImageFeatureValue> ifvs = {
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")
};

// same session, different binding
auto model = LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"model.onnx");
Expand Down Expand Up @@ -191,7 +192,8 @@ void MultiThreadMultiSessionOnDevice(const LearningModelDevice& device) {
auto path = FileHelpers::GetModulePath() + L"model.onnx";
auto model = LearningModel::LoadFromFilePath(path);
std::vector<ImageFeatureValue> ivfs = {
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")
};
std::vector<int> max_indices = {
281, // tabby, tabby cat
0 // tench, Tinca tinca
Expand Down Expand Up @@ -257,7 +259,8 @@ void MultiThreadSingleSessionOnDevice(const LearningModelDevice& device) {
LearningModelSession model_session = nullptr;
WINML_EXPECT_NO_THROW(model_session = LearningModelSession(model, device));
std::vector<ImageFeatureValue> ivfs = {
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")
};
std::vector<int> max_indices = {
281, // tabby, tabby cat
0 // tench, Tinca tinca
Expand Down Expand Up @@ -322,7 +325,8 @@ const ConcurrencyTestsApi& getapi() {
MultiThreadSingleSessionGpu,
EvalAsyncDifferentModels,
EvalAsyncDifferentSessions,
EvalAsyncDifferentBindings};
EvalAsyncDifferentBindings
};

if (SkipGpuTests()) {
api.MultiThreadMultiSessionGpu = SkipTest;
Expand Down
3 changes: 2 additions & 1 deletion winml/test/image/imageTestHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,8 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
// 3 is number of channels we use. R G B without alpha.
UINT64 bufferbytesize = 3 * sizeof(float) * softwareBitmap.PixelWidth() * softwareBitmap.PixelHeight();
D3D12_HEAP_PROPERTIES heapProperties = {
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
};
D3D12_RESOURCE_DESC resourceDesc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
Expand Down
3 changes: 2 additions & 1 deletion winml/test/image/imagetests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -939,7 +939,8 @@ TEST_F(ImageTests, ImageBindingAsGPUTensor) {
UINT64 buffer_byte_size =
static_cast<uint64_t>(software_bitmap.PixelWidth()) * software_bitmap.PixelHeight() * 3 * sizeof(float);
D3D12_HEAP_PROPERTIES heap_properties = {
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
};
D3D12_RESOURCE_DESC resource_desc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
Expand Down
3 changes: 2 additions & 1 deletion winml/test/model/model_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,8 @@ static std::vector<ITestCase*> GetAllTestCases() {
ORT_TSTR("tf_resnet_v2_152"),
ORT_TSTR("vgg19"),
ORT_TSTR("yolov3"),
ORT_TSTR("zfnet512")};
ORT_TSTR("zfnet512")
};
allDisabledTests.insert(std::begin(x86DisabledTests), std::end(x86DisabledTests));
#endif
// Bad onnx test output caused by previously wrong SAME_UPPER/SAME_LOWER for ConvTranspose
Expand Down
10 changes: 4 additions & 6 deletions winml/test/model/skip_model_tests.h
Original file line number Diff line number Diff line change
Expand Up @@ -161,10 +161,8 @@ std::unordered_map<std::string, std::pair<std::string, std::string>> disabledGpu
test name -> absolute difference sampleTolerance
*/
std::unordered_map<std::string, double> sampleTolerancePerTests({
{"fp16_inception_v1_opset7_GPU",0.005 },
{"fp16_inception_v1_opset8_GPU", 0.005},
{ "candy_opset9_GPU",
0.00150000 }, // Intel(R) UHD Graphics 630 (29.20.100.9020) AP machine has inaccurate GPU results for FNS Candy opset 9 https://microsoft.visualstudio.com/OS/_workitems/edit/30696168/
{ "fp16_tiny_yolov2_opset8_GPU",
0.109000 }, // Intel(R) UHD Graphics 630 (29.20.100.9020) AP machine has inaccurate GPU results for FNS Candy opset 9 https://microsoft.visualstudio.com/OS/_workitems/edit/30696168/
{"fp16_inception_v1_opset7_GPU", 0.005},
{"fp16_inception_v1_opset8_GPU", 0.005},
{ "candy_opset9_GPU", 0.00150000}, // Intel(R) UHD Graphics 630 (29.20.100.9020) AP machine has inaccurate GPU results for FNS Candy opset 9 https://microsoft.visualstudio.com/OS/_workitems/edit/30696168/

Check warning on line 166 in winml/test/model/skip_model_tests.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] winml/test/model/skip_model_tests.h#L166

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
winml/test/model/skip_model_tests.h:166:  Lines should be <= 120 characters long  [whitespace/line_length] [2]

Check warning on line 166 in winml/test/model/skip_model_tests.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] winml/test/model/skip_model_tests.h#L166

At least two spaces is best between code and comments [whitespace/comments] [2]
Raw output
winml/test/model/skip_model_tests.h:166:  At least two spaces is best between code and comments  [whitespace/comments] [2]
{ "fp16_tiny_yolov2_opset8_GPU", 0.109000}, // Intel(R) UHD Graphics 630 (29.20.100.9020) AP machine has inaccurate GPU results for FNS Candy opset 9 https://microsoft.visualstudio.com/OS/_workitems/edit/30696168/

Check warning on line 167 in winml/test/model/skip_model_tests.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] winml/test/model/skip_model_tests.h#L167

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
winml/test/model/skip_model_tests.h:167:  Lines should be <= 120 characters long  [whitespace/line_length] [2]

Check warning on line 167 in winml/test/model/skip_model_tests.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] winml/test/model/skip_model_tests.h#L167

At least two spaces is best between code and comments [whitespace/comments] [2]
Raw output
winml/test/model/skip_model_tests.h:167:  At least two spaces is best between code and comments  [whitespace/comments] [2]
});
3 changes: 2 additions & 1 deletion winml/test/scenario/cppwinrt/CustomNullOp.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ struct NullOperatorFactory : winrt::implements<NullOperatorFactory, IMLOperatorK
std::vector<MLOperatorEdgeDescription> allowedEdges{
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Double),
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float),
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)};
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)
};
typeConstraint.allowedTypes = allowedEdges.data();
typeConstraint.allowedTypeCount = static_cast<uint32_t>(allowedEdges.size());

Expand Down
Loading

0 comments on commit f2ffca0

Please sign in to comment.