Skip to content

Commit 03e719a

Browse files
committed
Adding more details on how to load things.
- Loading with memmap - Loading a sharded tensor - Moved some snippets to `candle-examples/src/lib.rs` This is because managing book specific dependencies is a pain rust-lang/mdBook#706 - This causes a non aligned inclusion rust-lang/mdBook#1856 which we have to ignore fmt to remove. mdbook might need some more love :)
1 parent 37d6392 commit 03e719a

File tree

4 files changed

+143
-12
lines changed

4 files changed

+143
-12
lines changed

candle-book/src/inference/hub.md

Lines changed: 35 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ let weights = candle::safetensors::load(weights, &Device::Cpu);
2525

2626
We now have access to all the [tensors](https://huggingface.co/bert-base-uncased?show_tensors=true) within the file.
2727

28+
You can check all the names of the tensors [here](https://huggingface.co/bert-base-uncased?show_tensors=true)
29+
2830

2931
## Using async
3032

@@ -35,17 +37,9 @@ cargo add hf-hub --features tokio
3537
```
3638

3739
```rust,ignore
38-
# extern crate candle;
39-
# extern crate hf_hub;
40-
use hf_hub::api::tokio::Api;
41-
use candle::Device;
42-
43-
let api = Api::new().unwrap();
44-
let repo = api.model("bert-base-uncased".to_string());
45-
46-
let weights = repo.get("model.safetensors").await.unwrap();
47-
48-
let weights = candle::safetensors::load(weights, &Device::Cpu);
40+
# This is tested directly in examples crate because it needs external dependencies unfortunately:
41+
# See [this](https://github.com/rust-lang/mdBook/issues/706)
42+
{{#include ../../../candle-examples/src/lib.rs:book_hub_1}}
4943
```
5044

5145

@@ -78,3 +72,33 @@ let output = linear.forward(&input_ids);
7872
```
7973

8074
For a full reference, you can check out the full [bert](https://github.com/LaurentMazare/candle/tree/main/candle-examples/examples/bert) example.
75+
76+
## Memory mapping
77+
78+
For more efficient loading, instead of reading the file, you could use [`memmap2`](https://docs.rs/memmap2/latest/memmap2/)
79+
80+
**Note**: Be careful about memory mapping it seems to cause issues on [Windows, WSL](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/5893)
81+
and will definitely be slower on network mounted disk, because it will issue more read calls.
82+
83+
```rust,ignore
84+
{{#include ../../../candle-examples/src/lib.rs:book_hub_2}}
85+
```
86+
87+
**Note**: This operation is **unsafe**. [See the safety notice](https://docs.rs/memmap2/latest/memmap2/struct.Mmap.html#safety).
88+
In practice model files should never be modified, and the mmaps should be mostly READONLY anyway, so the caveat most likely does not apply, but always keep it in mind.
89+
90+
91+
## Tensor Parallel Sharding
92+
93+
When using multiple GPUs to use in Tensor Parallel in order to get good latency, you can load only the part of the Tensor you need.
94+
95+
For that you need to use [`safetensors`](https://crates.io/crates/safetensors) directly.
96+
97+
```bash
98+
cargo add safetensors
99+
```
100+
101+
102+
```rust,ignore
103+
{{#include ../../../candle-examples/src/lib.rs:book_hub_3}}
104+
```

candle-core/src/safetensors.rs

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,11 @@ fn convert_back(tensor: &Tensor) -> Result<Vec<u8>> {
242242

243243
pub fn load<P: AsRef<Path>>(filename: P, device: &Device) -> Result<HashMap<String, Tensor>> {
244244
let data = std::fs::read(filename.as_ref())?;
245-
let st = safetensors::SafeTensors::deserialize(&data)?;
245+
load_buffer(&data[..], device)
246+
}
247+
248+
pub fn load_buffer(data: &[u8], device: &Device) -> Result<HashMap<String, Tensor>> {
249+
let st = safetensors::SafeTensors::deserialize(data)?;
246250
st.tensors()
247251
.into_iter()
248252
.map(|(name, view)| Ok((name, view.load(device)?)))

candle-examples/Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ half = { workspace = true, optional = true }
2626
[dev-dependencies]
2727
anyhow = { workspace = true }
2828
byteorder = { workspace = true }
29+
hf-hub = { workspace = true, features=["tokio"]}
2930
clap = { workspace = true }
3031
hf-hub = { workspace = true }
3132
memmap2 = { workspace = true }
@@ -35,6 +36,9 @@ tracing = { workspace = true }
3536
tracing-chrome = { workspace = true }
3637
tracing-subscriber = { workspace = true }
3738
wav = { workspace = true }
39+
# Necessary to disambiguate with tokio in wasm examples which are 1.28.1
40+
tokio = "1.29.1"
41+
memmap2.workspace = true
3842

3943
[build-dependencies]
4044
anyhow = { workspace = true }

candle-examples/src/lib.rs

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,102 @@ pub fn device(cpu: bool) -> Result<Device> {
1111
Ok(device)
1212
}
1313
}
14+
15+
#[cfg(test)]
16+
mod tests {
17+
// NOTE: Waiting on https://github.com/rust-lang/mdBook/pull/1856
18+
#[rustfmt::skip]
19+
#[tokio::test]
20+
async fn book_hub_1() {
21+
// ANCHOR: book_hub_1
22+
use candle::Device;
23+
use hf_hub::api::tokio::Api;
24+
25+
let api = Api::new().unwrap();
26+
let repo = api.model("bert-base-uncased".to_string());
27+
28+
let weights_filename = repo.get("model.safetensors").await.unwrap();
29+
30+
let weights = candle::safetensors::load(weights_filename, &Device::Cpu).unwrap();
31+
// ANCHOR_END: book_hub_1
32+
assert_eq!(weights.len(), 206);
33+
}
34+
35+
#[rustfmt::skip]
36+
#[test]
37+
fn book_hub_2() {
38+
// ANCHOR: book_hub_2
39+
use candle::Device;
40+
use hf_hub::api::sync::Api;
41+
use memmap2::Mmap;
42+
use std::fs;
43+
44+
let api = Api::new().unwrap();
45+
let repo = api.model("bert-base-uncased".to_string());
46+
let weights_filename = repo.get("model.safetensors").unwrap();
47+
48+
let file = fs::File::open(weights_filename).unwrap();
49+
let mmap = unsafe { Mmap::map(&file).unwrap() };
50+
let weights = candle::safetensors::load_buffer(&mmap[..], &Device::Cpu).unwrap();
51+
// ANCHOR_END: book_hub_2
52+
assert_eq!(weights.len(), 206);
53+
}
54+
55+
#[rustfmt::skip]
56+
#[test]
57+
fn book_hub_3() {
58+
// ANCHOR: book_hub_3
59+
use candle::{DType, Device, Tensor};
60+
use hf_hub::api::sync::Api;
61+
use memmap2::Mmap;
62+
use safetensors::slice::IndexOp;
63+
use safetensors::SafeTensors;
64+
use std::fs;
65+
66+
let api = Api::new().unwrap();
67+
let repo = api.model("bert-base-uncased".to_string());
68+
let weights_filename = repo.get("model.safetensors").unwrap();
69+
70+
let file = fs::File::open(weights_filename).unwrap();
71+
let mmap = unsafe { Mmap::map(&file).unwrap() };
72+
73+
// Use safetensors directly
74+
let tensors = SafeTensors::deserialize(&mmap[..]).unwrap();
75+
let view = tensors
76+
.tensor("bert.encoder.layer.0.attention.self.query.weight")
77+
.unwrap();
78+
79+
// We're going to load shard with rank 1, within a world_size of 4
80+
// We're going to split along dimension 0 doing VIEW[start..stop, :]
81+
let rank = 1;
82+
let world_size = 4;
83+
let dim = 0;
84+
let dtype = view.dtype();
85+
let mut tp_shape = view.shape().to_vec();
86+
let size = tp_shape[0];
87+
88+
if size % world_size != 0 {
89+
panic!("The dimension is not divisble by `world_size`");
90+
}
91+
let block_size = size / world_size;
92+
let start = rank * block_size;
93+
let stop = (rank + 1) * block_size;
94+
95+
// Everything is expressed in tensor dimension
96+
// bytes offsets is handled automatically for safetensors.
97+
98+
let iterator = view.slice(start..stop).unwrap();
99+
100+
tp_shape[dim] = block_size;
101+
102+
// Convert safetensors Dtype to candle DType
103+
let dtype: DType = dtype.try_into().unwrap();
104+
105+
// TODO: Implement from_buffer_iterator to we can skip the extra CPU alloc.
106+
let raw: Vec<u8> = iterator.into_iter().flatten().cloned().collect();
107+
let tp_tensor = Tensor::from_raw_buffer(&raw, dtype, &tp_shape, &Device::Cpu).unwrap();
108+
// ANCHOR_END: book_hub_3
109+
assert_eq!(view.shape(), &[768, 768]);
110+
assert_eq!(tp_tensor.dims(), &[192, 768]);
111+
}
112+
}

0 commit comments

Comments
 (0)