Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Many fixes to get unit tests passing on Windows. #7431

Merged
merged 13 commits into from
Feb 25, 2021
6 changes: 6 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,12 @@ if(MSVC)
add_compile_options(/wd4180)
# DLL interface warning in c++
add_compile_options(/wd4251)
# destructor was implicitly defined as deleted
add_compile_options(/wd4624)
# unary minus operator applied to unsigned type, result still unsigned
add_compile_options(/wd4146)
# 'inline': used more than once
add_compile_options(/wd4141)
else(MSVC)
set(WARNING_FLAG -Wall)
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
Expand Down
15 changes: 11 additions & 4 deletions apps/cpp_rpc/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
set(TVM_RPC_SOURCES
cmake_policy(SET CMP0069 NEW) # suppress cmake warning about IPO

set(TVM_RPC_SOURCES
main.cc
rpc_env.cc
rpc_server.cc
Expand All @@ -11,7 +13,12 @@ endif()
# Set output to same directory as the other TVM libs
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
add_executable(tvm_rpc ${TVM_RPC_SOURCES})
set_property(TARGET tvm_rpc PROPERTY INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE)

include(CheckIPOSupported)
check_ipo_supported(RESULT result OUTPUT output)
if(result)
set_property(TARGET tvm_rpc PROPERTY INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE)
endif()

if(WIN32)
target_compile_definitions(tvm_rpc PUBLIC -DNOMINMAX)
Expand All @@ -35,5 +42,5 @@ target_include_directories(
PUBLIC DLPACK_PATH
PUBLIC DMLC_PATH
)
target_link_libraries(tvm_rpc tvm_runtime)

target_link_libraries(tvm_rpc tvm_runtime)
1 change: 1 addition & 0 deletions cmake/modules/LibInfo.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ function(add_lib_info src_file)
TVM_INFO_USE_ARM_COMPUTE_LIB="${USE_ARM_COMPUTE_LIB}"
TVM_INFO_USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME="${USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME}"
TVM_INFO_INDEX_DEFAULT_I64="${INDEX_DEFAULT_I64}"
TVM_CXX_COMPILER_PATH="${CMAKE_CXX_COMPILER}"
)

endfunction()
2 changes: 1 addition & 1 deletion cmake/utils/FindLLVM.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ macro(find_llvm use_llvm)
string(STRIP ${TVM_LLVM_VERSION} TVM_LLVM_VERSION)
# definitions
string(REGEX MATCHALL "(^| )-D[A-Za-z0-9_]*" __llvm_defs ${__llvm_cxxflags})
set(LLVM_DEFINTIIONS "")
set(LLVM_DEFINITIONS "")
foreach(__flag IN ITEMS ${__llvm_defs})
string(STRIP "${__flag}" __llvm_def)
list(APPEND LLVM_DEFINITIONS "${__llvm_def}")
Expand Down
1 change: 1 addition & 0 deletions conda/build-environment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,4 @@ dependencies:
- bzip2
- make
- scipy
- pillow
4 changes: 3 additions & 1 deletion python/tvm/auto_scheduler/cost_model/xgb_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,11 +116,13 @@ def __init__(
if xgb is None:
xgb = __import__("xgboost")
except ImportError:
# add "from Node" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"XGBoost is required for XGBModel. "
"Please install its python package first. "
"Help: (https://xgboost.readthedocs.io/en/latest/) "
)
) from None

self.xgb_params = {
"max_depth": 10,
Expand Down
9 changes: 6 additions & 3 deletions python/tvm/contrib/cc.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def create_shared(output, objects, options=None, cc="g++"):
):
_linux_compile(output, objects, options, cc, compile_shared=True)
elif sys.platform == "win32":
_windows_shared(output, objects, options)
_windows_compile(output, objects, options)
else:
raise ValueError("Unsupported platform")

Expand All @@ -71,6 +71,8 @@ def create_executable(output, objects, options=None, cc="g++"):
"""
if sys.platform == "darwin" or sys.platform.startswith("linux"):
_linux_compile(output, objects, options, cc)
elif sys.platform == "win32":
_windows_compile(output, objects, options)
else:
raise ValueError("Unsupported platform")

Expand Down Expand Up @@ -212,9 +214,9 @@ def _linux_compile(output, objects, options, compile_cmd="g++", compile_shared=F
raise RuntimeError(msg)


def _windows_shared(output, objects, options):
def _windows_compile(output, objects, options):
cmd = ["clang"]
cmd += ["-O2", "-flto=full", "-fuse-ld=lld-link"]
cmd += ["-O2"]

if output.endswith(".so") or output.endswith(".dll"):
cmd += ["-shared"]
Expand All @@ -240,6 +242,7 @@ def _windows_shared(output, objects, options):
)
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += " ".join(cmd) + "\n"
msg += py_str(out)

raise RuntimeError(msg)
6 changes: 6 additions & 0 deletions python/tvm/contrib/nvcc.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,12 @@ def compile_cuda(code, target="ptx", arch=None, options=None, path_target=None):
cmd += ["-o", file_target]
cmd += [temp_code]

cxx_compiler_path = tvm.support.libinfo().get("TVM_CXX_COMPILER_PATH")
if cxx_compiler_path != "":
# This tells nvcc where to find the c++ compiler just in case it is not in the path.
# On Windows it is not in the path by default.
cmd += ["-ccbin", cxx_compiler_path]

proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)

(out, _) = proc.communicate()
Expand Down
2 changes: 1 addition & 1 deletion src/auto_scheduler/search_policy/sketch_policy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ Array<State> SketchPolicyNode::EvolutionarySearch(const Array<State>& init_popul
// auxiliary global variables
std::vector<float> pop_scores;
std::vector<double> pop_selection_probs;
float max_score = -1e-10;
float max_score = -1e-10f;
pop_scores.reserve(population);
pop_selection_probs.reserve(population);
std::uniform_real_distribution<> dis(0.0, 1.0);
Expand Down
7 changes: 6 additions & 1 deletion src/support/libinfo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,10 @@
#define TVM_INFO_INDEX_DEFAULT_I64 "NOT-FOUND"
#endif

#ifndef TVM_CXX_COMPILER_PATH
#define TVM_CXX_COMPILER_PATH ""
#endif

namespace tvm {

/*!
Expand Down Expand Up @@ -262,7 +266,8 @@ TVM_DLL Map<String, String> GetLibInfo() {
{"USE_TARGET_ONNX", TVM_INFO_USE_TARGET_ONNX},
{"USE_ARM_COMPUTE_LIB", TVM_INFO_USE_ARM_COMPUTE_LIB},
{"USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME", TVM_INFO_USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME},
{"INDEX_DEFAULT_I64", TVM_INFO_INDEX_DEFAULT_I64}};
{"INDEX_DEFAULT_I64", TVM_INFO_INDEX_DEFAULT_I64},
{"TVM_CXX_COMPILER_PATH", TVM_CXX_COMPILER_PATH}};
return result;
}

Expand Down
1 change: 1 addition & 0 deletions src/target/source/codegen_c_host.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ void CodeGenCHost::Init(bool output_ssa, bool emit_asserts, std::string target_s
emit_asserts_ = emit_asserts;
declared_globals_.clear();
decl_stream << "// tvm target: " << target_str << "\n";
decl_stream << "#define TVM_EXPORTS\n";
decl_stream << "#include \"tvm/runtime/c_runtime_api.h\"\n";
decl_stream << "#include \"tvm/runtime/c_backend_api.h\"\n";
decl_stream << "#include <math.h>\n";
Expand Down
56 changes: 30 additions & 26 deletions src/target/source/codegen_cuda.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,20 @@ std::string CodeGenCUDA::Finish() {
decl_stream << "#include <mma.h>\n";
}

decl_stream << "\n#ifdef _WIN32\n";
decl_stream << " using uint = unsigned int;\n";
decl_stream << " using uchar = unsigned char;\n";
decl_stream << " using ushort = unsigned short;\n";
decl_stream << " using int64_t = long long;\n";
decl_stream << " using uint64_t = unsigned long long;\n";
decl_stream << "#else\n";
decl_stream << " #define uint unsigned int\n";
decl_stream << " #define uchar unsigned char\n";
decl_stream << " #define ushort unsigned short\n";
decl_stream << " #define int64_t long\n";
decl_stream << " #define uint64_t ulong\n";
decl_stream << "#endif\n";

return CodeGenC::Finish();
}

Expand All @@ -99,7 +113,7 @@ void CodeGenCUDA::BindThreadIndex(const IterVar& iv) {
void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*)
int lanes = t.lanes();
if (t.is_handle()) {
ICHECK_EQ(lanes, 1) << "do not yet support vector types";
ICHECK(t.is_scalar()) << "do not yet support vector types";
os << "void*";
return;
}
Expand All @@ -108,7 +122,7 @@ void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*)
switch (t.bits()) {
case 16:
enable_fp16_ = true;
if (lanes == 1) {
if (t.is_scalar()) {
os << "half";
} else if (lanes <= 8) {
// Emit CUDA code to access fp16 vector elements.
Expand Down Expand Up @@ -136,7 +150,7 @@ void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*)
fail = true;
break;
}
if (!fail && (lanes == 1 || t.bits() == 16)) return;
if (!fail && (t.is_scalar() || t.bits() == 16)) return;
if (!fail && (lanes >= 2 && lanes <= 4)) {
os << lanes;
return;
Expand All @@ -154,15 +168,11 @@ void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*)
}
} else if (t.is_uint() || t.is_int()) {
if (t.is_uint()) {
if (t.lanes() != 1) {
os << "u";
} else {
os << "unsigned ";
}
os << "u";
}
switch (t.bits()) {
case 1: {
if (t.lanes() == 1) {
if (t.is_scalar()) {
os << "int";
return;
} else if (t.lanes() == 8) {
Expand All @@ -179,7 +189,7 @@ void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*)
}
}
case 4: {
if (t.lanes() == 1) {
if (t.is_scalar()) {
os << "int";
return;
} else if (t.lanes() == 4) {
Expand Down Expand Up @@ -220,7 +230,7 @@ void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*)
enable_int8_ = true;
os << "int4";
return;
} else if (!t.is_uint() && t.lanes() == 1) {
} else if (!t.is_uint() && t.is_scalar()) {
os << "signed char";
break;
} else {
Expand All @@ -235,22 +245,16 @@ void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*)
os << "int";
break;
case 64: {
if (sizeof(long) != 8) { // NOLINT(*)
if (t.lanes() == 1) {
os << "long long";
break;
} else if (t.lanes() == 2) {
os << "longlong";
break;
} else {
// No longlong3, longlong4
LOG(FATAL) << "Cannot convert type " << t << " to CUDA type on a L32 platform";
break;
}
} else {
os << "long";
break;
if (t.is_scalar()) {
os << "int64_t";
} else if (t.lanes() == 2) {
os << "longlong2";
} else if (t.lanes() == 3) {
os << "longlong3";
} else if (t.lanes() == 4) {
os << "longlong4";
}
return;
}
default:
fail = true;
Expand Down
42 changes: 42 additions & 0 deletions tests/python/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import sys
import tvm

collect_ignore = []
if sys.platform.startswith("win"):
collect_ignore.append("frontend/caffe")
collect_ignore.append("frontend/caffe2")
collect_ignore.append("frontend/coreml")
collect_ignore.append("frontend/darknet")
collect_ignore.append("frontend/keras")
collect_ignore.append("frontend/mxnet")
collect_ignore.append("frontend/pytorch")
collect_ignore.append("frontend/tensorflow")
collect_ignore.append("frontend/tflite")
collect_ignore.append("frontend/onnx")
collect_ignore.append("driver/tvmc/test_autoscheduler.py")
collect_ignore.append("unittest/test_auto_scheduler_cost_model.py") # stack overflow
# collect_ignore.append("unittest/test_auto_scheduler_measure.py") # exception ignored
collect_ignore.append("unittest/test_auto_scheduler_search_policy.py") # stack overflow
# collect_ignore.append("unittest/test_auto_scheduler_measure.py") # exception ignored

collect_ignore.append("unittest/test_tir_intrin.py")

if tvm.support.libinfo().get("USE_MICRO", "OFF") != "ON":
collect_ignore.append("unittest/test_micro_transport.py")
13 changes: 7 additions & 6 deletions tests/python/unittest/test_auto_scheduler_cost_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,15 @@ def test_xgb_model():
assert rmse <= 0.3

# test loading a record file
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, inputs, results)
model.update_from_file(fp.name)
tmpdir = tvm.contrib.utils.tempdir()
tmpfile = tmpdir.relpath("test1")
auto_scheduler.save_records(tmpfile, inputs, results)
model.update_from_file(tmpfile)

# test model serialization
with tempfile.NamedTemporaryFile() as fp:
model.save(fp.name)
model.load(fp.name)
tmpfile = tmpdir.relpath("test2")
model.save(tmpfile)
model.load(tmpfile)


if __name__ == "__main__":
Expand Down
4 changes: 3 additions & 1 deletion tests/python/unittest/test_crt.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@
import copy
import glob
import os
import pty
import pytest

pytest.importorskip("pty")
import sys
import subprocess
import textwrap
Expand Down
Loading