diff --git a/docs/ONNXAI.md b/docs/ONNXAI.md index 5529733442..73eba1fea5 100644 --- a/docs/ONNXAI.md +++ b/docs/ONNXAI.md @@ -3,7 +3,7 @@ # About ONNX-MLIR is an open-source project for compiling ONNX models into native code -on x86, P and Z machines (and more). It is built on top of Multi-Level +on x86, Power, s390x and other architectures. It is built on top of Multi-Level Intermediate Representation (MLIR) compiler infrastructure. # Slack channel diff --git a/docs/doc_example/main.c b/docs/doc_example/main.c index 83537c21c5..3a940d0aca 100644 --- a/docs/doc_example/main.c +++ b/docs/doc_example/main.c @@ -1,5 +1,6 @@ #include #include +#include OMTensorList *run_main_graph(OMTensorList *); @@ -11,9 +12,16 @@ OMTensorList *create_input_list() { // Construct float arrays filled with 1s or 2s. float *x1Data = (float *)malloc(sizeof(float) * num_elements); + // Check if memory is allocated for generating the data. + if(!x1Data) return NULL; for (int i = 0; i < num_elements; i++) x1Data[i] = 1.0; float *x2Data = (float *)malloc(sizeof(float) * num_elements); + // Check if memory is allocated for generating the data. + if(!x2Data){ + free(x1Data); + return NULL; + } for (int i = 0; i < num_elements; i++) x2Data[i] = 2.0; @@ -32,7 +40,10 @@ OMTensorList *create_input_list() { int main() { // Generate input TensorList OMTensorList *input_list = create_input_list(); - + if(!input_list){ + // Return 2 for failure to create inputs. + return 2; + } // Call the compiled onnx model function. OMTensorList *output_list = run_main_graph(input_list); if (!output_list) { diff --git a/src/Compiler/CompilerUtils.cpp b/src/Compiler/CompilerUtils.cpp index d166802e06..96191a975e 100644 --- a/src/Compiler/CompilerUtils.cpp +++ b/src/Compiler/CompilerUtils.cpp @@ -171,18 +171,21 @@ int Command::exec(std::string wdir) const { } void showCompilePhase(std::string msg) { - time_t rawTime; + time_t rawTime = 0; struct tm *timeInfo; - char buffer[80]; + char buffer[80] = ""; // Remember first time. static time_t firstRawTime; static bool hasFirstRawTime = false; // Get current date. - time(&rawTime); - timeInfo = localtime(&rawTime); - strftime(buffer, 80, "%c", timeInfo); - std::string currentTime(buffer); + std::string currentTime(""); + if (time(&rawTime) == -1 || (timeInfo = localtime(&rawTime)) == NULL || + (strftime(buffer, 80, "%c", timeInfo)) == 0) { + currentTime = "Error obtaining current time"; + } else { + currentTime = buffer; + } // Compute time difference in seconds. int diff = 0; diff --git a/src/Conversion/ONNXToKrnl/CMakeLists.txt b/src/Conversion/ONNXToKrnl/CMakeLists.txt index f5faedf2a5..6a68f3cf2a 100644 --- a/src/Conversion/ONNXToKrnl/CMakeLists.txt +++ b/src/Conversion/ONNXToKrnl/CMakeLists.txt @@ -11,6 +11,7 @@ add_onnx_mlir_library(OMONNXToKrnl ControlFlow/If.cpp ControlFlow/Loop.cpp ControlFlow/Scan.cpp + ControlFlow/Yield.cpp ConvertONNXToKrnl.cpp ML/CategoryMapper.cpp Math/CumSum.cpp diff --git a/src/Conversion/ONNXToKrnl/ControlFlow/If.cpp b/src/Conversion/ONNXToKrnl/ControlFlow/If.cpp index a3b2677152..0edbb4bd62 100644 --- a/src/Conversion/ONNXToKrnl/ControlFlow/If.cpp +++ b/src/Conversion/ONNXToKrnl/ControlFlow/If.cpp @@ -54,14 +54,6 @@ struct ONNXIfOpLowering : public OpConversionPattern { rewriter.eraseBlock(&scfBranch.back()); scfBranch.takeBody(graph); - rewriter.setInsertionPointToEnd(&scfBranch.back()); - - Operation *yieldOp = scfBranch.back().getTerminator(); - llvm::SmallVector outputs; - if (failed(rewriter.getRemappedValues(yieldOp->getOperands(), outputs))) { - llvm_unreachable("failed to convert branch return values"); - } - rewriter.replaceOpWithNewOp(yieldOp, outputs); } }; diff --git a/src/Conversion/ONNXToKrnl/ControlFlow/Yield.cpp b/src/Conversion/ONNXToKrnl/ControlFlow/Yield.cpp new file mode 100644 index 0000000000..df6b03b5ce --- /dev/null +++ b/src/Conversion/ONNXToKrnl/ControlFlow/Yield.cpp @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +//===--------------------- Yield.cpp - Lowering Yield Op ------------------===// +// +// Copyright 2019-2023 The IBM Research Authors. +// +// ============================================================================= +// +// This file lowers the ONNX Yield Operator to Krnl dialect. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/SCF/IR/SCF.h" + +#include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp" + +using namespace mlir; + +namespace onnx_mlir { + +struct ONNXYieldOpLowering : public OpConversionPattern { + ONNXYieldOpLowering(TypeConverter &typeConverter, MLIRContext *ctx) + : OpConversionPattern(typeConverter, ctx) {} + + LogicalResult matchAndRewrite(ONNXYieldOp yieldOp, ONNXYieldOpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const final { + // Gather info. + Operation *op = yieldOp.getOperation(); + Location loc = ONNXLoc(op); + + MultiDialectBuilder create( + rewriter, loc); + + ValueRange inputs = yieldOp.getOperands(); + llvm::SmallVector outputs; + for (Value input : inputs) { + Type inputType = input.getType(); + Type outputType = typeConverter->convertType(inputType); + outputs.emplace_back(typeConverter->materializeTargetConversion( + rewriter, loc, outputType, input)); + } + + rewriter.replaceOpWithNewOp(yieldOp, outputs); + + onnxToKrnlSimdReport(op); + return success(); + } +}; + +void populateLoweringONNXYieldOpPattern(RewritePatternSet &patterns, + TypeConverter &typeConverter, MLIRContext *ctx) { + patterns.insert(typeConverter, ctx); +} + +} // namespace onnx_mlir diff --git a/src/Conversion/ONNXToKrnl/ConvertONNXToKrnl.cpp b/src/Conversion/ONNXToKrnl/ConvertONNXToKrnl.cpp index 3b81bd5005..fd78bd2f4f 100644 --- a/src/Conversion/ONNXToKrnl/ConvertONNXToKrnl.cpp +++ b/src/Conversion/ONNXToKrnl/ConvertONNXToKrnl.cpp @@ -203,6 +203,7 @@ void populateONNXToKrnlConversionPattern(RewritePatternSet &patterns, populateLoweringONNXIfOpPattern(patterns, typeConverter, ctx); populateLoweringONNXLoopOpPattern(patterns, typeConverter, ctx); populateLoweringONNXScanOpPattern(patterns, typeConverter, ctx); + populateLoweringONNXYieldOpPattern(patterns, typeConverter, ctx); // Math populateLoweringONNXCumSumOpPattern(patterns, typeConverter, ctx); populateLoweringONNXDFTOpPattern(patterns, typeConverter, ctx); diff --git a/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp b/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp index 750b87a6b8..db72437837 100644 --- a/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp +++ b/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp @@ -317,6 +317,8 @@ void populateLoweringONNXLoopOpPattern( mlir::RewritePatternSet &, mlir::TypeConverter &, mlir::MLIRContext *); void populateLoweringONNXScanOpPattern( mlir::RewritePatternSet &, mlir::TypeConverter &, mlir::MLIRContext *); +void populateLoweringONNXYieldOpPattern( + mlir::RewritePatternSet &, mlir::TypeConverter &, mlir::MLIRContext *); // `Math` directory methods: void populateLoweringONNXClipOpPattern( diff --git a/test/mlir/conversion/onnx_to_krnl/ControlFlow/If.mlir b/test/mlir/conversion/onnx_to_krnl/ControlFlow/If.mlir index 2e83a47a36..0891f3c5b0 100644 --- a/test/mlir/conversion/onnx_to_krnl/ControlFlow/If.mlir +++ b/test/mlir/conversion/onnx_to_krnl/ControlFlow/If.mlir @@ -8,15 +8,20 @@ func.func @test_if_simple(%arg0: tensor, %arg1: tensor, %arg2: tensor }) : (tensor) -> tensor return %0 : tensor -// CHECK-LABEL: @test_if_simple +// CHECK-LABEL: func.func @test_if_simple // CHECK-SAME: ([[PARAM_0_:%.+]]: memref, [[PARAM_1_:%.+]]: memref, [[PARAM_2_:%.+]]: memref) -> memref { -// CHECK: [[LOAD_PARAM_0_MEM_:%.+]] = krnl.load [[PARAM_0_]][] : memref -// CHECK: [[VAR_1_:%.+]] = scf.if [[LOAD_PARAM_0_MEM_]] -> (memref) { -// CHECK: scf.yield [[PARAM_1_]] : memref +// CHECK-DAG: [[VAR_0_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_2_]] : memref to tensor +// CHECK-DAG: [[VAR_1_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_1_]] : memref to tensor +// CHECK-DAG: [[LOAD_PARAM_0_MEM_:%.+]] = krnl.load [[PARAM_0_]][] : memref +// CHECK-NOT: separator of consecutive DAGs +// CHECK-DAG: [[VAR_3_:%.+]] = scf.if [[LOAD_PARAM_0_MEM_]] -> (memref) { +// CHECK-DAG: [[VAR_4_:%.+]] = builtin.unrealized_conversion_cast [[VAR_1_]] : tensor to memref +// CHECK: scf.yield [[VAR_4_]] : memref // CHECK: } else { -// CHECK: scf.yield [[PARAM_2_]] : memref +// CHECK: [[VAR_4_1_:%.+]] = builtin.unrealized_conversion_cast [[VAR_0_]] : tensor to memref +// CHECK: scf.yield [[VAR_4_1_]] : memref // CHECK: } -// CHECK: return [[VAR_1_]] : memref +// CHECK: return [[VAR_3_]] : memref // CHECK: } } diff --git a/test/mlir/conversion/onnx_to_krnl/ControlFlow/lit.local.cfg b/test/mlir/conversion/onnx_to_krnl/ControlFlow/lit.local.cfg deleted file mode 100644 index 880874b71c..0000000000 --- a/test/mlir/conversion/onnx_to_krnl/ControlFlow/lit.local.cfg +++ /dev/null @@ -1,2 +0,0 @@ -# TODO: remove this file when the excluded test is fixed -config.excludes.add("If_with_canonicalize.mlir")