Generics

struct method with generics


#![allow(unused)]
fn main() {
struct Point<T> {
    x: T,
    y: T,
}
impl<T> Point<T> {
    fn x(&self) -> &T {
        &self.x
	}
}
// Methods only available on f32's
impl Point<f32> {
    fn distance_from_origin(&self) -> f32 {
        (self.x.powi(2) + self.y.powi(2)).sqrt()
    }
}
}

Inference working with mixed types

P3 knows that its a Point<i32, char>

struct Point<X1, Y1> {
    x: X1,
    y: Y1,
}

impl<X1, Y1> Point<X1, Y1> {
    fn mixup<X2, Y2>(self, other: Point<X2, Y2>) -> Point<X1, Y2> {
        Point {
            x: self.x,
            y: other.y,
        }
    }
}

fn main() {
    let p1 = Point { x: 5, y: 10.4 };
    let p2 = Point { x: "Hello", y: 'c' };

    let p3 = p1.mixup(p2);

    println!("p3.x = {}, p3.y = {}", p3.x, p3.y);
}

Monomorphization

This is where generic types are replaced with concrete types at compile time.

e.g. if Option<T> is used with an i32 and a f64,


#![allow(unused)]
fn main() {
let integer = Some(5);
let float = Some(5.0);
println!("Wow: {} {}", integer, float);
}

At compile time the code will expand to:

enum Option_i32 {
    Some(i32),
    None,
}

enum Option_f64 {
    Some(f64),
    None,
}

fn main() {
    let integer = Option_i32::Some(5);
    let float = Option_f64::Some(5.0);
}

Which removes the runtime penalty, but does cost in compile time.