Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MSM improvements #372

Merged
merged 14 commits into from
Feb 15, 2024
Prev Previous commit
Next Next commit
Merge branch 'dev' into develop/dima/msm_improvements
  • Loading branch information
DmytroTym committed Feb 15, 2024
commit 3fc3ea66f2e7addb414332fa0496d8ceb8d499e7
27 changes: 10 additions & 17 deletions wrappers/rust/icicle-core/src/msm/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,28 +31,21 @@ where
C::ScalarField: ArkConvertible<ArkEquivalent = <C::ArkSWConfig as ArkCurveConfig>::ScalarField>,
C::BaseField: ArkConvertible<ArkEquivalent = <C::ArkSWConfig as ArkCurveConfig>::BaseField>,
{
let test_sizes = [4, 8, 16, 32, 64, 128, 256, 1000, 1 << 18];
let mut msm_results = HostOrDeviceSlice::cuda_malloc(1).unwrap();
for test_size in test_sizes {
let points = generate_random_affine_points_with_zeroes(test_size, 2);
let scalars = <C::ScalarField as FieldImpl>::Config::generate_random(test_size);
let points_ark: Vec<_> = points
.iter()
.map(|x| x.to_ark())
.collect();
let scalars_ark: Vec<_> = scalars
.iter()
.map(|x| x.to_ark())
.collect();
// if we simply transmute arkworks types, we'll get scalars or points in Montgomery format
// (just beware the possible extra flag in affine point types, can't transmute ark Affine because of that)
let scalars_mont = unsafe { &*(&scalars_ark[..] as *const _ as *const [C::ScalarField]) };
let device_count = get_device_count().unwrap();
(0..device_count) // TODO: this is proto-loadbalancer
.into_par_iter()
.for_each(move |device_id| {
//TODO: currently supported multi-GPU workflow:
// 1) User starts child host thread from parent host thread
// 2) Calls set_device once with selected device_id (0 is default device .. < device_count)
// 3) Perform all operations (without changing device on the thread)
// 4) If necessary - export results to parent host thread

set_device(device_id).unwrap();
let test_sizes = [4, 8, 16, 32, 64, 128, 256, 1000, 1 << 18];
let mut msm_results = HostOrDeviceSlice::cuda_malloc(1).unwrap();
for test_size in test_sizes {
let points = C::generate_random_affine_points(test_size);
let points = generate_random_affine_points_with_zeroes(test_size, 2);
let scalars = <C::ScalarField as FieldImpl>::Config::generate_random(test_size);
let points_ark: Vec<_> = points
.iter()
Expand Down
Loading
You are viewing a condensed version of this merge commit. You can view the full changes here.