Skip to content

Commit

Permalink
[AMP OP&Test] Append bf16/fp16 support 4 elementwise_max (PaddlePaddl…
Browse files Browse the repository at this point in the history
…e#51151)

* wisemax fp16 support

* add bf16 support 4 elementwise_max

* append broadcast 4 op 4 fp16 / bf16

* fix elewise_max ut bf16 numeric delta

* append fp/bf16 uts

* add fp/bf16 uts

* change bf16 uts delta

* fix some issue

* add prim 4 fp16
  • Loading branch information
piDack authored Mar 14, 2023
1 parent b4f49aa commit 143ecee
Showing 1 changed file with 221 additions and 62 deletions.
283 changes: 221 additions & 62 deletions python/paddle/fluid/tests/unittests/test_elementwise_max_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,23 @@


class TestElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
def init_data(self):
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
"float64"
)

def setUp(self):
self.init_data()
self.op_type = "elementwise_max"
self.prim_op_type = "prim"
self.enable_cinn = False
self.python_api = paddle.maximum
self.inputs = {'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}

def test_check_output(self):
Expand Down Expand Up @@ -90,40 +95,49 @@ def test_check_grad_ingore_y(self):
)


class TestElementwiseFP16Op(TestElementwiseOp):
def init_data(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16)
sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float16)
self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
np.float16
)


class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.uniform(0.1, 1, []).astype("float64")
y = np.random.uniform(0.1, 1, []).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype("float64")
self.y = np.random.uniform(0.1, 1, []).astype("float64")


class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseOp):
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype("float16")
self.y = np.random.uniform(0.1, 1, []).astype("float16")


class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
y = np.random.uniform(0.1, 1, []).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
def init_data(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.y = np.random.uniform(0.1, 1, []).astype("float64")


class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseOp):
def init_data(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
self.y = np.random.uniform(0.1, 1, []).astype("float16")


class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.uniform(0.1, 1, []).astype("float64")
y = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype("float64")
self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float64")


class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseOp):
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype("float16")
self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float16")


@unittest.skipIf(
Expand All @@ -135,23 +149,30 @@ def setUp(self):
"run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0.",
)
class TestElementwiseBF16Op(OpTest):
def init_data(self):
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float32)
self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
np.float32
)

def setUp(self):
self.init_data()
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
self.dtype = np.uint16
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float32)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
self.inputs = {
'X': convert_float_to_uint16(x),
'Y': convert_float_to_uint16(y),
'X': convert_float_to_uint16(self.x),
'Y': convert_float_to_uint16(self.y),
}
self.outputs = {
'Out': convert_float_to_uint16(np.maximum(self.x, self.y))
}
self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))}

def test_check_output(self):
if hasattr(self, 'attrs'):
Expand All @@ -173,32 +194,79 @@ def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'))


class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op):
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype("float32")
self.y = np.random.uniform(0.1, 1, []).astype("float32")

def test_check_grad_normal(self):
if hasattr(self, 'attrs'):
self.check_grad(
['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=False
)
else:
self.check_grad(
['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=True
)

def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', numeric_grad_delta=0.05, no_grad_set=set("X")
)

def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', numeric_grad_delta=0.05, no_grad_set=set('Y')
)


class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op):
def init_data(self):
self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
self.y = np.array([0.5]).astype("float32")
self.__class__.no_need_check_grad = True


@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast."
)
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64")
y = np.array([0.5]).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
def init_data(self):
self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64")
self.y = np.array([0.5]).astype("float64")


class TestElementwiseMaxFP16Op_scalar(TestElementwiseMaxOp_scalar):
def init_data(self):
self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16")
self.y = np.array([0.5]).astype("float16")


class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.random((100,)).astype("float64")
def init_data(self):
self.x = np.random.random((100,)).astype("float64")
sgn = np.random.choice([-1, 1], (100,)).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
"float64"
)


class TestElementwiseMaxFP16Op_Vector(TestElementwiseOp):
def init_data(self):
self.x = np.random.random((100,)).astype("float16")
sgn = np.random.choice([-1, 1], (100,)).astype("float16")
self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
"float16"
)


class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op):
def init_data(self):
self.x = np.random.random((100,)).astype("float32")
sgn = np.random.choice([-1, 1], (100,)).astype("float32")
self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
"float32"
)


class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
Expand All @@ -221,6 +289,26 @@ def setUp(self):
}


class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 0}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}


class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -241,6 +329,26 @@ def setUp(self):
}


class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
)
}


class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -260,6 +368,25 @@ def setUp(self):
}


class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}

self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)
)
}


class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -280,6 +407,26 @@ def setUp(self):
}


class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)
)
}


class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -293,5 +440,17 @@ def setUp(self):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16)
y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float16)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


if __name__ == '__main__':
unittest.main()

0 comments on commit 143ecee

Please sign in to comment.