#![crate_type = "lib"] pub fn soa(xs: &mut [f64], ys: &mut [f64]) { for (x, y) in xs.iter_mut().zip(ys.iter_mut()) { *x += 1.0; *y *= 3.5; } } pub fn aos(points: &mut [(f64, f64)]) { for point in points.iter_mut() { point.0 += 1.0; point.1 *= 3.5; } } /* , e.g. if you have a pile of 2D `f64` points and want to add 1 to the x coordinate and multiply the y coordinate by 3.5 the length of a series of x/y coordinates, a layout like `points: &mut [(f64, f64)]` means adjacent memory locations have different operations performed on them. If they're instead stored like `xs: &mut [f64]` and `ys: &mut [f64]`, then adjacent memory have the same operations, and it is much easier to vectorise. This is especially true if both coordinates are used in a single operation (e.g. `x + y`). For instance, in [the following code](http://is.gd/5p9g6s) (switch to [Release] and click [Asm]), `soa` vectorises (the inner loop uses `addpd` and `mulpd` instructions), but `aos` doesn't (the inner loop consists of a long series of `addsd`s and `mulsd`s) #![crate_type = "lib"] pub fn soa(xs: &mut [f64], ys: &mut [f64]) { for (x, y) in xs.iter_mut().zip(ys.iter_mut()) { *x += 1.0; *y *= 3.5; } } pub fn aos(points: &mut [(f64, f64)]) { for point in points.iter_mut() { point.0 += 1.0; point.1 *= 3.5; } }*/