Skip to content

Commit

Permalink
Fix typo (avaliable -> available) (PaddlePaddle#61734)
Browse files Browse the repository at this point in the history
* Fix

* ci
  • Loading branch information
co63oc committed Feb 20, 2024
1 parent 43a2a60 commit 42dfe09
Show file tree
Hide file tree
Showing 8 changed files with 27 additions and 27 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/the_one_ps.proto
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ message ServerServiceParameter {
optional string client_class = 2 [ default = "BrpcPsClient" ];
optional string service_class = 3 [ default = "BrpcPsService" ];
optional uint32 start_server_port = 4
[ default = 0 ]; // will find a avaliable port from it
[ default = 0 ]; // will find a available port from it
optional uint32 server_thread_num = 5 [ default = 12 ];
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/memory/allocation/naive_best_fit_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ void *Alloc<platform::CUDAPlace>(const platform::CUDAPlace &place,
size_t avail, total;
platform::GpuMemoryUsage(&avail, &total);
PADDLE_THROW(platform::errors::ResourceExhausted(
"Cannot allocate %s in GPU %d, avaliable %s, total %s, GpuMinChunkSize "
"Cannot allocate %s in GPU %d, available %s, total %s, GpuMinChunkSize "
"%s, GpuMaxChunkSize %s, GPU memory used: %s.",
string::HumanReadableSize(size),
place.device,
Expand Down Expand Up @@ -503,7 +503,7 @@ void *Alloc<platform::CustomPlace>(const platform::CustomPlace &place,
size_t avail, total;
phi::DeviceManager::MemoryStats(place, &total, &avail);
PADDLE_THROW(platform::errors::ResourceExhausted(
"Cannot allocate %s in %s:%d, avaliable %s, total %s, used "
"Cannot allocate %s in %s:%d, available %s, total %s, used "
"%s. ",
string::HumanReadableSize(size),
place.GetDeviceType(),
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/platform/device/ipu/ipu_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ namespace paddle {
namespace platform {
namespace ipu {

// get the number of all avaliable IPUs
// get the number of all available IPUs
int GetNumDevices();

// get the device id of all avaliable IPUs
// get the device id of all available IPUs
std::vector<int> GetDeviceIds();

} // namespace ipu
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/autotune/auto_tune_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class AutoTuneBase {
} else {
bool use_autotune = AutoTuneStatus::Instance().UseAutoTune();
if (use_autotune) {
// All avaliable kernels have ran while picking the best kernel,
// All available kernels have ran while picking the best kernel,
// so there may be no need for another kernel run.
auto best_idx = PickBestKernel(ctx, args...);
cache.Set(key, best_idx);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/funcs/blas/blaslt_impl.cu.h
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ struct CublasLtBase {
&returned_results));
PADDLE_ENFORCE_GT(returned_results,
0,
phi::errors::Unavailable("No GEMM algorithm avaliable."));
phi::errors::Unavailable("No GEMM algorithm available."));
int best_algo_idx = -1;
if (returned_results == 1 || FLAGS_cublaslt_exhaustive_search_times <= 0) {
best_algo_idx = 0;
Expand Down Expand Up @@ -748,7 +748,7 @@ struct CublasLtBase<int8_t, int32_t, MatmulDescriptor> {
&returned_results));
PADDLE_ENFORCE_GT(returned_results,
0,
phi::errors::Unavailable("No GEMM algorithm avaliable."));
phi::errors::Unavailable("No GEMM algorithm available."));
int best_algo_idx = -1;
if (returned_results == 1 || FLAGS_cublaslt_exhaustive_search_times <= 0) {
best_algo_idx = 0;
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/base/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def _compile_data_parallel(self, places, use_device, scope=None):
), "DGC only used under CUDA environment."
assert (
self._build_strategy.num_trainers * len(places) > 1
), "DGC is not avaliable for single card training."
), "DGC is not available for single card training."
assert (
self._build_strategy.reduce_strategy
== BuildStrategy.ReduceStrategy.AllReduce
Expand Down
18 changes: 9 additions & 9 deletions python/paddle/base/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -8087,12 +8087,12 @@ def _get_paddle_place(place):
return core.Place()

# GPU
avaliable_gpu_place = re.match(r'gpu:\d+', place)
if place == "gpu_pinned" or place == "gpu" or avaliable_gpu_place:
available_gpu_place = re.match(r'gpu:\d+', place)
if place == "gpu_pinned" or place == "gpu" or available_gpu_place:
if not core.is_compiled_with_cuda():
raise ValueError(
"The device should not be {}, since PaddlePaddle is "
"not compiled with CUDA".format(avaliable_gpu_place.group())
"not compiled with CUDA".format(available_gpu_place.group())
)
if place == "gpu_pinned":
return core.CUDAPinnedPlace()
Expand All @@ -8105,25 +8105,25 @@ def _get_paddle_place(place):
return core.CUDAPlace(device_id)

# XPU
avaliable_xpu_place = re.match(r'xpu:\d+', place)
if avaliable_xpu_place:
available_xpu_place = re.match(r'xpu:\d+', place)
if available_xpu_place:
if not core.is_compiled_with_xpu():
raise ValueError(
"The device should not be {}, since PaddlePaddle is "
"not compiled with XPU".format(avaliable_xpu_place.group())
"not compiled with XPU".format(available_xpu_place.group())
)
place_info_list = place.split(':', 1)
device_id = place_info_list[1]
device_id = int(device_id)
return core.XPUPlace(device_id)

# IPU
avaliable_ipu_place = re.match(r'ipu:\d+', place)
if avaliable_ipu_place:
available_ipu_place = re.match(r'ipu:\d+', place)
if available_ipu_place:
if not core.is_compiled_with_ipu():
raise ValueError(
"The device should not be {}, since PaddlePaddle is "
"not compiled with IPU".format(avaliable_ipu_place.group())
"not compiled with IPU".format(available_ipu_place.group())
)
place_info_list = place.split(':', 1)
device_id = place_info_list[1]
Expand Down
18 changes: 9 additions & 9 deletions python/paddle/device/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,29 +220,29 @@ def _convert_to_place(device):
)
place = core.IPUPlace()
else:
avaliable_gpu_device = re.match(r'gpu:\d+', lower_device)
avaliable_xpu_device = re.match(r'xpu:\d+', lower_device)
if avaliable_gpu_device:
available_gpu_device = re.match(r'gpu:\d+', lower_device)
available_xpu_device = re.match(r'xpu:\d+', lower_device)
if available_gpu_device:
if not core.is_compiled_with_cuda():
raise ValueError(
"The device should not be {}, since PaddlePaddle is "
"not compiled with CUDA".format(avaliable_gpu_device)
"not compiled with CUDA".format(available_gpu_device)
)
device_info_list = device.split(':', 1)
device_id = device_info_list[1]
device_id = int(device_id)
place = core.CUDAPlace(device_id)
if avaliable_xpu_device:
if available_xpu_device:
if not core.is_compiled_with_xpu():
raise ValueError(
"The device should not be {}, since PaddlePaddle is "
"not compiled with XPU".format(avaliable_xpu_device)
"not compiled with XPU".format(available_xpu_device)
)
device_info_list = device.split(':', 1)
device_id = device_info_list[1]
device_id = int(device_id)
place = core.XPUPlace(device_id)
if not avaliable_gpu_device and not avaliable_xpu_device:
if not available_gpu_device and not available_xpu_device:
device_info_list = device.split(':', 1)
device_type = device_info_list[0]
if device_type in core.get_all_custom_device_type():
Expand Down Expand Up @@ -299,8 +299,8 @@ def get_device():
This function can get the current global device of the program is running.
It's a string which is like 'cpu', 'gpu:x', 'xpu:x' and 'npu:x'. if the global device is not
set, it will return a string which is 'gpu:x' when cuda is avaliable or it
will return a string which is 'cpu' when cuda is not avaliable.
set, it will return a string which is 'gpu:x' when cuda is available or it
will return a string which is 'cpu' when cuda is not available.
Examples:
Expand Down

0 comments on commit 42dfe09

Please sign in to comment.