struct MyModel {
l1: Conv2d,
l2: Conv2d,
l3: Linear,
l4: Linear,
}
impl MyModel {
fn new (mem: &mut Memory) -> MyModel {
let l1 = Conv2d::new(mem, 5, 1, 10, 1);
let l2 = Conv2d::new(mem, 5, 10, 20, 1);
let l3 = Linear::new(mem, 320, 64);
let l4 = Linear::new(mem, 64, 10);
Self {
l1: l1,
l2: l2,
l3: l3,
l4: l4,
}
}
}
impl Compute for MyModel {
fn forward (&self, mem: &Memory, input: &Tensor) -> Tensor {
let mut o = self.l1.forward(mem, &input);
o = o.max_pool2d_default(2);
o = self.l2.forward(mem, &o);
o = o.max_pool2d_default(2);
o = o.flat_view();
o = self.l3.forward(mem, &o);
o = o.relu();
o = self.l4.forward(mem, &o);
o
}
}
fn main() {
let (mut x, y) = load_mnist();
x = x / 250.0;
x = x.view([-1, 1, 28, 28]);
let mut m = Memory::new();
let mymodel = MyModel::new(&mut m);
train(&mut m, &x, &y, &mymodel, 20, 512, cross_entropy, 0.0001);
let out = mymodel.forward(&m, &x);
println!("Accuracy: {}", accuracy(&y, &out));
}
公式1
高斯核
拉普拉斯核
pub fn conv2d>(
&self,
weight: &Tensor,
bias: Option,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64
) -> Tensor
use tch::{Tensor, vision::image, Kind, Device};
fn rgb_to_grayscale(tensor: &Tensor) -> Tensor {
let red_channel = tensor.get(0);
let green_channel = tensor.get(1);
let blue_channel = tensor.get(2);
// 使用练度公式计算灰度张量
let grayscale = (red_channel * 0.2989) + (green_channel * 0.5870) + (blue_channel * 0.1140);
grayscale.unsqueeze(0)
}
fn main() {
let mut img = image::load("mypic.jpg").expect("Failed to open image");
img = rgb_to_grayscale(&img).reshape(&[1,1,1024,1024]);
let bias: Tensor = Tensor::full(&[1], 0.0, (Kind::Float, Device::Cpu));
// 定义并应用高斯核
let mut k1 = [-1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0];
for element in k1.iter_mut() {
*element /= 16.0;
}
let kernel1 = Tensor::from_slice(&k1)
.reshape(&[1,1,3,3])
.to_kind(Kind::Float);
img = img.conv2d(&kernel1, Some(&bias), &[1], &[0], &[1], 1);
// 定义并应用拉普拉斯核
let k2 = [0.0, 1.0, 0.0, 1.0, -4.0, 1.0, 0.0, 1.0, 0.0];
let kernel2 = Tensor::from_slice(&k2)
.reshape(&[1,1,3,3])
.to_kind(Kind::Float);
img = img.conv2d(&kernel2, Some(&bias), &[1], &[0], &[1], 1);
image::save(&img, "filtered.jpg");
}
pub struct Conv2d {
params: HashMap,
}
impl Conv2d {
pub fn new (mem: &mut Memory, kernel_size: i64, in_channel: i64, out_channel: i64, stride: i64) -> Self {
let mut p = HashMap::new();
p.insert("kernel".to_string(), mem.new_push(&[out_channel, in_channel, kernel_size, kernel_size], true));
p.insert("bias".to_string(), mem.push(Tensor::full(&[out_channel], 0.0, (Kind::Float, Device::Cpu)).requires_grad_(true)));
p.insert("stride".to_string(), mem.push(Tensor::from(stride as i64)));
Self {
params: p,
}
}
}
impl Compute for Conv2d {
fn forward (&self, mem: &Memory, input: &Tensor) -> Tensor {
let kernel = mem.get(self.params.get(&"kernel".to_string()).unwrap());
let stride: i64 = mem.get(self.params.get(&"stride".to_string()).unwrap()).int64_value(&[]);
let bias = mem.get(self.params.get(&"bias".to_string()).unwrap());
input.conv2d(&kernel, Some(bias), &[stride], 0, &[1], 1)
}
}
fn apply_grads_adam(&mut self, learning_rate: f32) {
let mut g = Tensor::new();
const BETA:f32 = 0.9;
let mut velocity = Tensor::zeros(&[self.size as i64], (Kind::Float, Device::Cpu)).split(1, 0);
let mut mom = Tensor::zeros(&[self.size as i64], (Kind::Float, Device::Cpu)).split(1, 0);
let mut vel_corr = Tensor::zeros(&[self.size as i64], (Kind::Float, Device::Cpu)).split(1, 0);
let mut mom_corr = Tensor::zeros(&[self.size as i64], (Kind::Float, Device::Cpu)).split(1, 0);
let mut counter = 0;
self.values
.iter_mut()
.for_each(|t| {
if t.requires_grad() {
g = t.grad();
mom[counter] = BETA * &mom[counter] + (1.0 - BETA) * &g;
velocity[counter] = BETA * &velocity[counter] + (1.0 - BETA) * (&g.pow(&Tensor::from(2)));
mom_corr[counter] = &mom[counter] / (Tensor::from(1.0 - BETA).pow(&Tensor::from(2)));
vel_corr[counter] = &velocity[counter] / (Tensor::from(1.0 - BETA).pow(&Tensor::from(2)));
t.set_data(&(t.data() - learning_rate *(&mom_corr[counter]/(&velocity[counter].sqrt() + 0.0000001))));
t.zero_grad();
}
counter += 1;
});
}