Skip to content

Commit

Permalink
Fix excuted executed (PaddlePaddle#61153)
Browse files Browse the repository at this point in the history
* Fix

* Fix
  • Loading branch information
co63oc committed Jan 26, 2024
1 parent 707c915 commit e030ca5
Show file tree
Hide file tree
Showing 10 changed files with 28 additions and 28 deletions.
2 changes: 1 addition & 1 deletion python/paddle/static/nn/static_pylayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def _rename_var_recursively_(cur_block, var_old_to_new):
op._rename_output(old_var_name, new_var_name)

# NOTE(MarioLulab): block attr type with the name of "blocks" or "sub_block" indicates
# the block might be excuted. We should rename the var name in these blocks recursively
# the block might be executed. We should rename the var name in these blocks recursively
block_attr_names = ["blocks", "sub_block"]

for op in cur_block.ops:
Expand Down
14 changes: 7 additions & 7 deletions python/paddle/static/quantization/post_training_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -1116,10 +1116,10 @@ def _init_sampling_act_histogram(self):
if var_name not in self._sampling_act_histogram:
min_val = self._sampling_act_abs_min_max[var_name][0]
max_val = self._sampling_act_abs_min_max[var_name][1]
hist, hist_edeges = np.histogram(
hist, hist_edges = np.histogram(
[], bins=self._histogram_bins, range=(min_val, max_val)
)
self._sampling_act_histogram[var_name] = [hist, hist_edeges]
self._sampling_act_histogram[var_name] = [hist, hist_edges]

def _calculate_kl_hist_threshold(self):
'''
Expand Down Expand Up @@ -1155,16 +1155,16 @@ def _calculate_kl_hist_threshold(self):
var_name not in self._sampling_act_histogram
):
continue
hist, hist_edeges = self._sampling_act_histogram[var_name]
hist, hist_edges = self._sampling_act_histogram[var_name]
if self._algo == "KL":
bin_width = hist_edeges[1] - hist_edeges[0]
bin_width = hist_edges[1] - hist_edges[0]
self._quantized_var_threshold[var_name] = cal_kl_threshold(
hist, bin_width, self._activation_bits
)
elif self._algo == "hist":
self._quantized_var_threshold[
var_name
] = self._get_hist_scaling_factor(hist, hist_edeges)
] = self._get_hist_scaling_factor(hist, hist_edges)

def _update_program(self):
'''
Expand Down Expand Up @@ -1995,7 +1995,7 @@ def _mul_channel_wise_dequantization(self, quantized_weight_data, scales):

def _calculate_threshold(self, input, threshold_rate, histogram_bins=5000):
input_abs = np.abs(input)
hist, hist_edeges = np.histogram(
hist, hist_edges = np.histogram(
input_abs, bins=histogram_bins, range=(0, np.max(input_abs))
)
hist = hist / float(sum(hist))
Expand All @@ -2006,5 +2006,5 @@ def _calculate_threshold(self, input, threshold_rate, histogram_bins=5000):
if hist_sum >= 1.0 - threshold_rate:
hist_index = i + 1
break
bin_width = hist_edeges[1] - hist_edeges[0]
bin_width = hist_edges[1] - hist_edges[0]
return hist_index * bin_width
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def __init__(
self._gru_ops = ['fusion_gru', 'multi_gru']
self._lstm_ops = ['fusion_lstm']
self._weight_thresholds = {}
# Collect the Input and Output sclaes from Fake quant models
# Collect the Input and Output scales from Fake quant models
self._var_quant_scales = {}
self._max_range = {}
self._s8_max = 127
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/static/quantization/quantization_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def __init__(
preprocess method works or not. The function's input is non-quantized
activation and function returns processed activation to be quantized.
If None, the activation will be quantized directly. Default is None.
optimizer_func(function): Fuction return a optimizer. When 'is_test' is
optimizer_func(function): Function return a optimizer. When 'is_test' is
False and user want to use self-defined quantization function and
preprocess function, this function must be set. Default is None.
executor(base.Executor): If user want to use self-defined quantization
Expand Down Expand Up @@ -414,7 +414,7 @@ def _has_weight(op):
if not self._is_test:
self._create_global_step(graph)
ops = graph.all_op_nodes()
# Do the preproccess of quantization, such as skipping some ops
# Do the preprocess of quantization, such as skipping some ops
# for not being quantized.
for op in ops:
if (
Expand Down Expand Up @@ -3597,7 +3597,7 @@ def _var_name_order(self, graph):

def _insert_quant_dequant(self, graph, var_node, op):
"""
Insert per tensort quantize_linear and dequantize_linear node between var_node and op
Insert per tensor quantize_linear and dequantize_linear node between var_node and op
"""
insert_quant_pass = InsertQuantizeLinear(
self._place,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/attribute.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def is_floating_point(x):


def is_integer(x):
"""Return whether x is a tensor of integeral data type.
"""Return whether x is a tensor of integral data type.
Args:
x (Tensor): The input tensor.
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/tensor/einsum.py
Original file line number Diff line number Diff line change
Expand Up @@ -646,10 +646,10 @@ def plan_einsum(operands, g_view, g_shape, g_supports, g_count, n_bcast):
# I... are aligned and not to be combined immediately
# J... are not aligned and not to be combined immediately
# K... are aligned and should be immediately combined
# At this point the non-trivial broadcast dimensinos in K are already reduced
# At this point the non-trivial broadcast dimensions in K are already reduced
# and removed. That means all K dimensions are aligned and their sizes are not 1.
# We then inspect the layout of I,J,K plus the above observation to make
# specializatoin decisions. The current strategy is set as follows:
# specialization decisions. The current strategy is set as follows:
# (1) if I... J... K... are all empty, it's multiplying a scalar
# (2) if K... are empty, better use a broadcast
# (3) if I... J... empty and K... not empty, a vector-vector multiply (or a dot)
Expand Down Expand Up @@ -748,7 +748,7 @@ def parse_fake_shape(equation, operands, labels):
def fake_shape(ori_label, label, op):
"""
1. ori_label is the original labels, not aligned by '....'
2. if the '...' is evalulated to empty list, there is no '.' in label
2. if the '...' is evaluated to empty list, there is no '.' in label
"""
assert len(op.shape) == len(label), (
"length of shape and length of label must be the same, but received %d != %d"
Expand Down Expand Up @@ -802,7 +802,7 @@ def einsum_v2(equation, *operands):
"""
einsum v2 implementation.
1. Implement C++ EinsumOp.
2. V2 create the EinsumOp to calculate, so just a little verifty work in python.
2. V2 create the EinsumOp to calculate, so just a little verify work in python.
3. V2 use opt_einsum.contract_path to optimize the multivariable einsum.
"""
n_op = len(operands)
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1668,7 +1668,7 @@ def nan_to_num_(x, nan=0.0, posinf=None, neginf=None, name=None):
Please refer to :ref:`api_paddle_nan_to_num`.
"""
# NOTE(tiancaishaonvjituizi): it seems that paddle handles the dtype of python float number
# incorrectly, so we have to explicitly contruct tensors here
# incorrectly, so we have to explicitly construct tensors here
posinf_value = paddle.full_like(x, float("+inf"))
neginf_value = paddle.full_like(x, float("-inf"))
nan = paddle.full_like(x, nan)
Expand Down Expand Up @@ -2716,7 +2716,7 @@ def _check_input(x):
if len(x.shape) < 2:
raise ValueError(
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"of dimensions is no less than 2. But received: %d, "
"x's shape: %s." % (len(x.shape), x.shape)
)

Expand Down Expand Up @@ -6419,7 +6419,7 @@ def take(x, index, mode='raise', name=None):
Args:
x (Tensor): An N-D Tensor, its data type should be int32, int64, float32, float64.
index (Tensor): An N-D Tensor, its data type should be int32, int64.
mode (str, optional): Specifies how out-of-bounds index will behave. the candicates are ``'raise'``, ``'wrap'`` and ``'clip'``.
mode (str, optional): Specifies how out-of-bounds index will behave. the candidates are ``'raise'``, ``'wrap'`` and ``'clip'``.
- ``'raise'``: raise an error (default);
- ``'wrap'``: wrap around;
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/tensor/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def standard_gamma(x, name=None):
out_i \sim Gamma (alpha = x_i, beta = 1.0)
Args:
x(Tensor): A tensor with rate parameter of standrad gamma Distribution. The data type
x(Tensor): A tensor with rate parameter of standard gamma Distribution. The data type
should be bfloat16, float16, float32, float64.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
Expand Down Expand Up @@ -779,7 +779,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):

out = out * std + mean
if not in_dynamic_or_pir_mode():
out.stop_grediant = True
out.stop_gradient = True
return out


Expand All @@ -788,7 +788,7 @@ def normal_(x, mean=0.0, std=1.0, name=None):
"""
This is the inplace version of api ``normal``, which returns a Tensor filled
with random values sampled from a normal distribution. The output Tensor will
be inplaced with input ``x``. Please refer to :ref:`api_tensor_noraml`.
be inplaced with input ``x``. Please refer to :ref:`api_tensor_normal`.
Args:
x(Tensor): The input tensor to be filled with random values.
Expand All @@ -799,7 +799,7 @@ def normal_(x, mean=0.0, std=1.0, name=None):
std (float|Tensor, optional): The standard deviation of the output Tensor's normal distribution.
If ``std`` is float, all elements of the output Tensor shared the same standard deviation.
If ``std`` is a Tensor(data type supports float32, float64), it has per-element standard deviations.
Defaule is 1.0
Default is 1.0
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -1239,7 +1239,7 @@ def top_p_sampling(x, ps, threshold=None, seed=None, name=None):
Args:
x(Tensor): A N-D Tensor with type float32, float16 and bfloat16.
ps(Tensor): A 1-D Tensor with type float32, float16 and bfloat16.
it is the cumulative probalitity threshold to limit low probality input.
it is the cumulative probability threshold to limit low probability input.
threshold(Tensor): A 1-D Tensor with type float32, float16 and bfloat16.
it is the absolute probability threshold to limit input, it will take effect simultaneously with `ps`, if not set, the default value is 0.f.
seed(int, optional): the random seed,
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/utils/cpp_extension/extension_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def clean_object_if_change_cflags(so_path, extension):
"""
If already compiling source before, we should check whether cflags
have changed and delete the built object to re-compile the source
even though source file content keeps unchanaged.
even though source file content keeps unchanged.
"""

def serialize(path, version_info):
Expand Down Expand Up @@ -938,7 +938,7 @@ def get_build_directory(verbose=False):

def parse_op_info(op_name):
"""
Parse input names and outpus detail information from registered custom op
Parse input names and outputs detail information from registered custom op
from OpInfoMap.
"""
if op_name not in OpProtoHolder.instance().op_proto_map:
Expand Down Expand Up @@ -1356,7 +1356,7 @@ def _jit_compile(file_path, verbose=False):

def parse_op_name_from(sources):
"""
Parse registerring custom op name from sources.
Parse registering custom op name from sources.
"""

def regex(content):
Expand Down

0 comments on commit e030ca5

Please sign in to comment.