Skip to content

Commit

Permalink
Fix a couple typos (huggingface#1451)
Browse files Browse the repository at this point in the history
* Mixtral quantized instruct.

* Fix a couple typos.
  • Loading branch information
LaurentMazare authored Dec 17, 2023
1 parent c630622 commit 1e86717
Show file tree
Hide file tree
Showing 24 changed files with 44 additions and 42 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ This documents the main changes to the `candle` crate.
[760](https://github.com/huggingface/candle/pull/760).
- Add the Segment-Anything Model (SAM) as an example
[773](https://github.com/huggingface/candle/pull/773).
- TinyViT backbone for the segemnt anything example
- TinyViT backbone for the segment anything example
[787](https://github.com/huggingface/candle/pull/787).
- Shape with holes support
[770](https://github.com/huggingface/candle/pull/770).
Expand Down
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,10 @@ And then head over to
## Useful External Resources
- [`candle-tutorial`](https://github.com/ToluClassics/candle-tutorial): A
very detailed tutorial showing how to convert a PyTorch model to Candle.
- [`candle-lora`](https://github.com/EricLBuehler/candle-lora): Efficient and ergonomic LoRA implemenation for Candle. `candle-lora` has
out-of-the-box LoRA support for many models from Candle, which can be found [here](https://github.com/EricLBuehler/candle-lora/tree/master/candle-lora-transformers/examples).
- [`candle-lora`](https://github.com/EricLBuehler/candle-lora): Efficient and
ergonomic LoRA implementation for Candle. `candle-lora` has
out-of-the-box LoRA support for many models from Candle, which can be found
[here](https://github.com/EricLBuehler/candle-lora/tree/master/candle-lora-transformers/examples).
- [`optimisers`](https://github.com/KGrewal1/optimisers): A collection of optimisers
including SGD with momentum, AdaGrad, AdaDelta, AdaMax, NAdam, RAdam, and RMSprop.
- [`candle-vllm`](https://github.com/EricLBuehler/candle-vllm): Efficient platform for inference and
Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion candle-core/src/indexer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ impl Tensor {
#[derive(Debug)]
/// Generic structure used to index a slice of the tensor
pub enum TensorIndexer {
/// This selects the elemnts for which an index has some specific value.
/// This selects the elements for which an index has some specific value.
Select(usize),
/// This is a regular slice, purely indexing a chunk of the tensor
Narrow(Bound<usize>, Bound<usize>),
Expand Down
4 changes: 2 additions & 2 deletions candle-core/src/quantized/avx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ pub(crate) fn vec_dot_q3k_q8k(n: usize, xs: &[BlockQ3K], ys: &[BlockQ8K]) -> Res
q3 = q3.add(32);

// Prepare low and high bits
// We hardcode the shifts here to avoid loading them into a seperate register
// We hardcode the shifts here to avoid loading them into a separate register
let q3l_0 = _mm256_and_si256(q3bits, m3);
let q3h_0 = if j == 0 {
_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 0)), 0)
Expand Down Expand Up @@ -586,7 +586,7 @@ pub(crate) fn vec_dot_q5k_q8k(n: usize, xs: &[BlockQ5K], ys: &[BlockQ8K]) -> Res
let q5bits = _mm256_loadu_si256(q5 as *const __m256i);
q5 = q5.add(32);

//Similar to q3k we hardcode the shifts here to avoid loading them into a seperate register
//Similar to q3k we hardcode the shifts here to avoid loading them into a separate register
let q5l_0 = _mm256_and_si256(q5bits, m4);
let q5l_0_shift_input = _mm256_and_si256(hbits, hmask);
let q5l_0_right_shift = match j {
Expand Down
2 changes: 1 addition & 1 deletion candle-core/src/quantized/gguf_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ impl Content {
) -> Result<QTensor> {
let tensor_info = match self.tensor_infos.get(name) {
Some(tensor_info) => tensor_info,
None => crate::bail!("cannot find tensor-infor for {name}"),
None => crate::bail!("cannot find tensor info for {name}"),
};
tensor_info.read(reader, self.tensor_data_offset)
}
Expand Down
4 changes: 2 additions & 2 deletions candle-core/src/tensor.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//! Tensors are N-dimenional matrixes of elements using a single data type.
//! Tensors are N-dimensional matrixes of elements using a single data type.
#![allow(clippy::redundant_closure_call)]
use crate::backend::{BackendDevice, BackendStorage};
use crate::op::{
Expand Down Expand Up @@ -669,7 +669,7 @@ impl Tensor {
}

/// Split a tensor into the specified number of chunks, this may return less chunks than
/// specificed.
/// specified.
pub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>> {
let dim = dim.to_index(self.shape(), "chunk")?;
let size = self.dim(dim)?;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def step(self, action):
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
Expand Down
2 changes: 1 addition & 1 deletion candle-nn/src/var_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ struct TensorData<B: Backend> {
/// A trait that defines how tensor data is retrieved.
///
/// Typically this would use disk storage in some specific format, or random initialization.
/// Note that there is a speciliazed version of this trait (`SimpleBackend`) that can be used most
/// Note that there is a specialized version of this trait (`SimpleBackend`) that can be used most
/// of the time. The main restriction is that it doesn't allow for specific args (besides
/// initialization hints).
pub trait Backend: Send + Sync {
Expand Down
3 changes: 2 additions & 1 deletion candle-pyo3/py_src/candle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
from .candle import *
except ImportError as e:
# If we are in development mode, or we did not bundle the DLLs, we try to locate them here
# PyO3 wont give us any infomration about what DLLs are missing, so we can only try to load the DLLs and re-import the module
# PyO3 wont give us any information about what DLLs are missing, so we can only try to load
# the DLLs and re-import the module
logging.warning("DLLs were not bundled with this package. Trying to locate them...")
import os
import platform
Expand Down
4 changes: 2 additions & 2 deletions candle-pyo3/py_src/candle/nn/container.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ def extend(self, modules: Iterable[Module]) -> "ModuleList":
self.add_module(str(offset + i), module)
return self

# remove forward alltogether to fallback on Module's _forward_unimplemented
# remove forward altogether to fallback on Module's _forward_unimplemented


class ModuleDict(Module):
Expand Down Expand Up @@ -480,4 +480,4 @@ def update(self, modules: Mapping[str, Module]) -> None:
# that's too cumbersome to type correctly with overloads, so we add an ignore here
self[m[0]] = m[1] # type: ignore[assignment]

# remove forward alltogether to fallback on Module's _forward_unimplemented
# remove forward altogether to fallback on Module's _forward_unimplemented
17 changes: 8 additions & 9 deletions candle-pyo3/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ trait MapDType {
enum Indexer {
Index(usize),
Slice(usize, usize),
Elipsis,
Ellipsis,
Expand,
IndexSelect(Tensor),
}
Expand Down Expand Up @@ -568,7 +568,7 @@ impl PyTensor {
"Ellipsis ('...') can only be used at the start of an indexing operation",
));
}
Ok((Indexer::Elipsis, dims.len() - (index_argument_count - 1)))
Ok((Indexer::Ellipsis, dims.len() - (index_argument_count - 1)))
} else if py_indexer.is_none() {
// Handle None e.g. tensor[None, 0]
Ok((Indexer::Expand, current_dim))
Expand Down Expand Up @@ -616,8 +616,9 @@ impl PyTensor {
current_dim += 1;
out
}
Indexer::Elipsis => {
// Elipsis is a special case, it means that all remaining dimensions should be selected => advance the current_dim to the last dimension we have indexers for
Indexer::Ellipsis => {
// Ellipsis is a special case, it means that all remaining dimensions should be
// selected => advance the current_dim to the last dimension we have indexers for
current_dim += dims.len() - (indexers.len() - 1);
x
}
Expand Down Expand Up @@ -960,11 +961,11 @@ impl PyTensor {
extraction_result: PyResult<T>,
err_msg: &'static str,
) -> PyResult<()> {
if let Ok(sucessfull_extraction) = extraction_result {
if let Ok(successful_extraction) = extraction_result {
if opt.is_some() {
return Err(PyValueError::new_err(err_msg));
}
*opt = Some(sucessfull_extraction);
*opt = Some(successful_extraction);
}
Ok(())
}
Expand Down Expand Up @@ -1045,9 +1046,7 @@ impl PyTensor {
.map_err(wrap_err)?,
(Some(device), None) => self.0.to_device(&device.as_device()?).map_err(wrap_err)?,
(None, Some(dtype)) => self.0.to_dtype(dtype.0).map_err(wrap_err)?,
(None, None) => {
return Err(PyTypeError::new_err("No valide dtype or device specified"))
}
(None, None) => return Err(PyTypeError::new_err("No valid dtype or device specified")),
};

Ok(PyTensor(result))
Expand Down
2 changes: 1 addition & 1 deletion candle-pyo3/stub.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def process_additional_function(fn):
string += function(obj, indent)

elif inspect.isgetsetdescriptor(obj):
# TODO it would be interesing to add the setter maybe ?
# TODO it would be interesting to add the setter maybe ?
string += f"{indent}@property\n"
string += function(obj, indent, text_signature="(self)")

Expand Down
4 changes: 2 additions & 2 deletions candle-pyo3/tests/bindings/test_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def __init__(self):
a.load_state_dict(statedict)


def test_module_throws_on_shape_missmatch():
def test_module_throws_on_shape_mismatch():
class A(Module):
def __init__(self):
super().__init__()
Expand Down Expand Up @@ -121,7 +121,7 @@ def __init__(self):
assert a.t.ggml_dtype == "Q4_0"


def test_module_dequantizes_tensors_automaticaly():
def test_module_dequantizes_tensors_automatically():
class A(Module):
def __init__(self):
super().__init__()
Expand Down
6 changes: 3 additions & 3 deletions candle-pyo3/tests/native/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def assert_bool(t: Tensor, expected: bool):
assert bool(t.values()) == expected


def test_tensor_supports_equality_opperations_with_scalars():
def test_tensor_supports_equality_operations_with_scalars():
t = Tensor(42.0)

assert_bool(t == 42.0, True)
Expand All @@ -106,7 +106,7 @@ def test_tensor_supports_equality_opperations_with_scalars():
assert_bool(t <= 42.0, True)


def test_tensor_supports_equality_opperations_with_tensors():
def test_tensor_supports_equality_operations_with_tensors():
t = Tensor(42.0)
same = Tensor(42.0)
other = Tensor(43.0)
Expand All @@ -130,7 +130,7 @@ def test_tensor_supports_equality_opperations_with_tensors():
assert_bool(t <= other, True)


def test_tensor_equality_opperations_can_broadcast():
def test_tensor_equality_operations_can_broadcast():
# Create a decoder attention mask as a test case
# e.g.
# [[1,0,0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ impl MaskDecoder {
sparse_prompt_embeddings: &Tensor,
dense_prompt_embeddings: &Tensor,
) -> Result<(Tensor, Tensor)> {
// Concatenate ouput tokens.
// Concatenate output tokens.
let output_tokens = Tensor::cat(
&[self.iou_token.embeddings(), self.mask_tokens.embeddings()],
0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@ use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::VarBuilder;

#[derive(Debug)]
struct PostionEmbeddingRandom {
struct PositionEmbeddingRandom {
positional_encoding_gaussian_matrix: Tensor,
}

impl PostionEmbeddingRandom {
impl PositionEmbeddingRandom {
fn new(num_pos_feats: usize, vb: VarBuilder) -> Result<Self> {
let positional_encoding_gaussian_matrix =
vb.get((2, num_pos_feats), "positional_encoding_gaussian_matrix")?;
Expand Down Expand Up @@ -52,7 +52,7 @@ impl PostionEmbeddingRandom {

#[derive(Debug)]
pub struct PromptEncoder {
pe_layer: PostionEmbeddingRandom,
pe_layer: PositionEmbeddingRandom,
point_embeddings: Vec<candle_nn::Embedding>,
not_a_point_embed: candle_nn::Embedding,
mask_downscaling_conv1: candle_nn::Conv2d,
Expand All @@ -76,7 +76,7 @@ impl PromptEncoder {
vb: VarBuilder,
) -> Result<Self> {
let num_points_embeddings = 4;
let pe_layer = PostionEmbeddingRandom::new(embed_dim / 2, vb.pp("pe_layer"))?;
let pe_layer = PositionEmbeddingRandom::new(embed_dim / 2, vb.pp("pe_layer"))?;
let not_a_point_embed = candle_nn::embedding(1, embed_dim, vb.pp("not_a_point_embed"))?;
let no_mask_embed = candle_nn::embedding(1, embed_dim, vb.pp("no_mask_embed"))?;
let cfg = candle_nn::Conv2dConfig {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Ancestral sampling with Euler method steps.
//!
//! Reference implemenation in Rust:
//! Reference implementation in Rust:
//!
//! https://github.com/pykeio/diffusers/blob/250b9ad1898af41e76a74c0d8d4292652823338a/src/schedulers/euler_ancestral_discrete.rs
//!
Expand Down Expand Up @@ -135,7 +135,7 @@ impl EulerAncestralDiscreteScheduler {
);
sigmas_int.push(0.0);

// standard deviation of the inital noise distribution
// standard deviation of the initial noise distribution
// f64 does not implement Ord such that there is no `max`, so we need to use this workaround
let init_noise_sigma = *sigmas_int
.iter()
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/llama2-c/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ impl Component for App {
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
Msg::SetModel(md) => {
self.status = "weights loaded succesfully!".to_string();
self.status = "weights loaded successfully!".to_string();
self.loaded = true;
console_log!("loaded weights");
self.worker.send(WorkerInput::ModelData(md));
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/llama2-c/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ macro_rules! console_log {
}

// Communication to the worker happens through bincode, the model weights and configs are fetched
// on the main thread and transfered via the following structure.
// on the main thread and transferred via the following structure.
#[derive(Serialize, Deserialize)]
pub struct ModelData {
pub tokenizer: Vec<u8>,
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/whisper/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ impl Component for App {
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
Msg::SetDecoder(md) => {
self.status = "weights loaded succesfully!".to_string();
self.status = "weights loaded successfully!".to_string();
self.loaded = true;
console_log!("loaded weights");
self.worker.send(WorkerInput::ModelData(md));
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/whisper/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ pub enum Task {
}

// Communication to the worker happens through bincode, the model weights and configs are fetched
// on the main thread and transfered via the following structure.
// on the main thread and transferred via the following structure.
#[derive(Serialize, Deserialize)]
pub struct ModelData {
pub weights: Vec<u8>,
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/yolo/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ impl Component for App {
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
Msg::SetModel(md) => {
self.status = "weights loaded succesfully!".to_string();
self.status = "weights loaded successfully!".to_string();
self.loaded = true;
console_log!("loaded weights");
self.worker.send(WorkerInput::ModelData(md));
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/yolo/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ macro_rules! console_log {
}

// Communication to the worker happens through bincode, the model weights and configs are fetched
// on the main thread and transfered via the following structure.
// on the main thread and transferred via the following structure.
#[derive(Serialize, Deserialize)]
pub struct ModelData {
pub weights: Vec<u8>,
Expand Down

0 comments on commit 1e86717

Please sign in to comment.