Skip to content

Commit 35e8f86

Browse files
[MLIR][ONNX] Add OnnxToTorch support for Dropout and Elu op
Signed-Off By: Vivek Khandelwal <vivekkhandelwal1424@gmail.com>
1 parent 07d0645 commit 35e8f86

File tree

2 files changed

+133
-0
lines changed

2 files changed

+133
-0
lines changed

lib/Conversion/TorchOnnxToTorch/DefaultDomainAtoF.cpp

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -904,6 +904,62 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
904904
binder.op, resultType, lhs, rhs);
905905
return success();
906906
});
907+
patterns.onOp(
908+
"Dropout", 12, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
909+
Location loc = binder.getLoc();
910+
Torch::ValueTensorType resultType;
911+
int64_t numOperands = binder.op->getNumOperands();
912+
SmallVector<Value> operands;
913+
int64_t seed;
914+
if (binder.tensorOperands(operands, numOperands) ||
915+
binder.s64IntegerAttr(seed, "seed", 0) ||
916+
binder.tensorResultTypeAtIndex(resultType, 0))
917+
return failure();
918+
919+
// Global Seed value is 0.
920+
if (seed != 0) {
921+
return rewriter.notifyMatchFailure(binder.op,
922+
"expected seed value to be 0");
923+
}
924+
925+
Value ratio, trainingMode;
926+
if (numOperands == 3) {
927+
ratio = rewriter.create<Torch::AtenFloatImplicitOp>(loc, operands[1]);
928+
Value trainingModeScalar =
929+
rewriter.create<Torch::AtenIntImplicitOp>(loc, operands[2]);
930+
Value cstOne = rewriter.create<Torch::ConstantIntOp>(
931+
loc, rewriter.getI64IntegerAttr(1));
932+
trainingMode = rewriter.create<Torch::AtenEqIntOp>(
933+
loc, trainingModeScalar, cstOne);
934+
} else if (numOperands == 2) {
935+
ratio = rewriter.create<Torch::AtenFloatImplicitOp>(loc, operands[1]);
936+
trainingMode = rewriter.create<Torch::ConstantBoolOp>(loc, false);
937+
} else {
938+
ratio = rewriter.create<Torch::ConstantFloatOp>(
939+
loc, rewriter.getF64FloatAttr(0.5));
940+
trainingMode = rewriter.create<Torch::ConstantBoolOp>(loc, false);
941+
}
942+
943+
Value dropout = rewriter.create<Torch::AtenDropoutOp>(
944+
loc, resultType, /*input=*/operands[0], ratio, trainingMode);
945+
946+
if (binder.op->getNumResults() == 1) {
947+
rewriter.replaceOp(binder.op, dropout);
948+
return success();
949+
}
950+
Torch::ValueTensorType maskType;
951+
if (binder.tensorResultTypeAtIndex(maskType, 1))
952+
return failure();
953+
Value dtype = rewriter.create<Torch::ConstantIntOp>(
954+
loc, rewriter.getI64IntegerAttr(
955+
(int64_t)torch_upstream::ScalarType::Bool));
956+
Value none = rewriter.create<Torch::ConstantNoneOp>(loc);
957+
Value mask = rewriter.create<Torch::AtenOnesLikeOp>(
958+
loc, maskType, operands[0], dtype, /*layout=*/none,
959+
/*device=*/none, /*pin_memory=*/none, /*memory_format=*/none);
960+
rewriter.replaceOp(binder.op, {dropout, mask});
961+
return success();
962+
});
907963
patterns.onOp("Equal", 1,
908964
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
909965
Torch::ValueTensorType resultType;
@@ -916,6 +972,25 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
916972
binder.op, resultType, lhs, rhs);
917973
return success();
918974
});
975+
patterns.onOp("Elu", 6,
976+
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
977+
Location loc = binder.getLoc();
978+
Torch::ValueTensorType resultType;
979+
Value input;
980+
float alpha;
981+
if (binder.tensorOperand(input) ||
982+
binder.f32FloatAttr(alpha, "alpha") ||
983+
binder.tensorResultType(resultType))
984+
return failure();
985+
Value cstAlpha = rewriter.create<Torch::ConstantFloatOp>(
986+
loc, rewriter.getF64FloatAttr(alpha));
987+
Value cstOne = rewriter.create<Torch::ConstantFloatOp>(
988+
loc, rewriter.getF64FloatAttr(1.0));
989+
rewriter.replaceOpWithNewOp<Torch::AtenEluOp>(
990+
binder.op, resultType, input, cstAlpha, /*scale=*/cstOne,
991+
/*input_scale=*/cstOne);
992+
return success();
993+
});
919994
patterns.onOp("Erf", 13,
920995
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
921996
Torch::ValueTensorType resultType;

test/Conversion/TorchOnnxToTorch/simple_ops_a_to_f.mlir

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -740,3 +740,61 @@ func.func @test_concat_3d_axis_negative_3(%arg0: !torch.vtensor<[2,2,2],f32>, %a
740740
%0 = torch.operator "onnx.Concat"(%arg0, %arg1) {torch.onnx.axis = -3 : si64} : (!torch.vtensor<[2,2,2],f32>, !torch.vtensor<[2,2,2],f32>) -> !torch.vtensor<[4,2,2],f32>
741741
return %0 : !torch.vtensor<[4,2,2],f32>
742742
}
743+
744+
// CHECK-LABEL: @test_dropout
745+
func.func @test_dropout(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 13 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
746+
// CHECK: torch.aten.dropout %arg0, %float5.000000e-01, %false : !torch.vtensor<[3],f32>, !torch.float, !torch.bool -> !torch.vtensor<[3],f32
747+
%0 = torch.operator "onnx.Dropout"(%arg0) : (!torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32>
748+
return %0 : !torch.vtensor<[3],f32>
749+
}
750+
751+
// CHECK-LABEL: @test_dropout_default
752+
func.func @test_dropout_default(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 13 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
753+
// CHECK: torch.aten.dropout %arg0, %float5.000000e-01, %false : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.bool -> !torch.vtensor<[3,4,5],f32>
754+
%0 = torch.operator "onnx.Dropout"(%arg0) {torch.onnx.seed = 0 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
755+
return %0 : !torch.vtensor<[3,4,5],f32>
756+
}
757+
758+
// CHECK-LABEL: @test_dropout_default_mask
759+
func.func @test_dropout_default_mask(%arg0: !torch.vtensor<[3,4,5],f32>) -> (!torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],i1>) attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 13 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
760+
// CHECK: torch.aten.dropout %arg0, %float5.000000e-01, %false : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.bool -> !torch.vtensor<[3,4,5],f32>
761+
// CHECK: torch.aten.ones_like %arg0, %int11, %none, %none, %none, %none : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[3,4,5],i1>
762+
%0:2 = torch.operator "onnx.Dropout"(%arg0) {torch.onnx.seed = 0 : si64} : (!torch.vtensor<[3,4,5],f32>) -> (!torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],i1>)
763+
return %0#0, %0#1 : !torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],i1>
764+
}
765+
766+
// CHECK-LABEL: @test_dropout_default_mask_ratio
767+
func.func @test_dropout_default_mask_ratio(%arg0: !torch.vtensor<[3,4,5],f32>, %arg1: !torch.vtensor<[],f32>) -> (!torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],i1>) attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 13 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
768+
// CHECK: torch.aten.dropout %arg0, %0, %false : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.bool -> !torch.vtensor<[3,4,5],f32>
769+
// CHECK: torch.aten.ones_like %arg0, %int11, %none, %none, %none, %none : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[3,4,5],i1>
770+
%0:2 = torch.operator "onnx.Dropout"(%arg0, %arg1) {torch.onnx.seed = 0 : si64} : (!torch.vtensor<[3,4,5],f32>, !torch.vtensor<[],f32>) -> (!torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],i1>)
771+
return %0#0, %0#1 : !torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],i1>
772+
}
773+
774+
// CHECK-LABEL: @test_dropout_default_ratio
775+
func.func @test_dropout_default_ratio(%arg0: !torch.vtensor<[3,4,5],f32>, %arg1: !torch.vtensor<[],f32>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 13 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
776+
// CHECK: torch.aten.dropout %arg0, %0, %false : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.bool -> !torch.vtensor<[3,4,5],f32>
777+
%0 = torch.operator "onnx.Dropout"(%arg0, %arg1) {torch.onnx.seed = 0 : si64} : (!torch.vtensor<[3,4,5],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[3,4,5],f32>
778+
return %0 : !torch.vtensor<[3,4,5],f32>
779+
}
780+
781+
// CHECK-LABEL: @test_training_dropout_zero_ratio
782+
func.func @test_training_dropout_zero_ratio(%arg0: !torch.vtensor<[3,4,5],f32>, %arg1: !torch.vtensor<[],f32>, %arg2: !torch.vtensor<[],i1>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 13 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
783+
// CHECK: torch.aten.dropout %arg0, %0, %2 : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.bool -> !torch.vtensor<[3,4,5],f32>
784+
%0 = torch.operator "onnx.Dropout"(%arg0, %arg1, %arg2) {torch.onnx.seed = 0 : si64} : (!torch.vtensor<[3,4,5],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],i1>) -> !torch.vtensor<[3,4,5],f32>
785+
return %0 : !torch.vtensor<[3,4,5],f32>
786+
}
787+
788+
// CHECK-LABEL: @test_elu_default
789+
func.func @test_elu_default(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 6 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
790+
// CHECK: torch.aten.elu %arg0, %float0.000000e00, %float1.000000e00, %float1.000000e00 : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.float, !torch.float -> !torch.vtensor<[3,4,5],f32>
791+
%0 = torch.operator "onnx.Elu"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
792+
return %0 : !torch.vtensor<[3,4,5],f32>
793+
}
794+
795+
// CHECK-LABEL: @test_elu_example
796+
func.func @test_elu_example(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 6 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
797+
// CHECK: torch.aten.elu %arg0, %float2.000000e00, %float1.000000e00, %float1.000000e00 : !torch.vtensor<[3],f32>, !torch.float, !torch.float, !torch.float -> !torch.vtensor<[3],f32>
798+
%0 = torch.operator "onnx.Elu"(%arg0) {torch.onnx.alpha = 2.000000e+00 : f32} : (!torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32>
799+
return %0 : !torch.vtensor<[3],f32>
800+
}

0 commit comments

Comments
 (0)