From 38b20e5ca7def8a447dafc30e09551ef38682a23 Mon Sep 17 00:00:00 2001 From: Thiago Machado Date: Wed, 6 Dec 2023 12:12:58 -0500 Subject: [PATCH] reduce tensor sizes for 'slow' tests --- dfdx-core/src/tensor_ops/conv2d/tests.rs | 22 ++++++------ dfdx-core/src/tensor_ops/convtrans2d/tests.rs | 10 +++--- dfdx-core/src/tensor_ops/log_softmax.rs | 2 +- dfdx-core/src/tensor_ops/matmul/mod.rs | 34 +++++++++---------- dfdx-core/src/tensor_ops/softmax.rs | 2 +- 5 files changed, 35 insertions(+), 35 deletions(-) diff --git a/dfdx-core/src/tensor_ops/conv2d/tests.rs b/dfdx-core/src/tensor_ops/conv2d/tests.rs index 85603de8b..b7110a22f 100644 --- a/dfdx-core/src/tensor_ops/conv2d/tests.rs +++ b/dfdx-core/src/tensor_ops/conv2d/tests.rs @@ -218,10 +218,10 @@ fn test_conv2d_s4p3k2() { #[test] fn test_batched_conv2d() { let dev: TestDevice = Default::default(); - let x: Tensor, TestDtype, _> = dev.sample_normal(); + let x: Tensor, TestDtype, _> = dev.sample_normal(); let w: Tensor, TestDtype, _> = dev.sample_normal(); - let y: Tensor, _, _, _> = + let y: Tensor, _, _, _> = (x.leaky_trace(), w.clone()).conv2d(Const::<3>, Const::<2>, Const::<1>, Const::<1>); let y0 = y.retaped::(); let grads0 = y.square().mean().backward(); @@ -229,11 +229,11 @@ fn test_batched_conv2d() { let w0 = grads0.get(&w); let x = x - .broadcast::, _>() - .reshape::>(); + .broadcast::, _>() + .reshape::>(); assert_eq!(x.strides, x.shape.strides()); - let y: Tensor, _, _, _> = + let y: Tensor, _, _, _> = (x.leaky_trace(), w.clone()).conv2d(Const::<3>, Const::<2>, Const::<1>, Const::<1>); for i in 0..10 { assert_close_to_tensor!(y0, y.retaped::().select(dev.tensor(i))); @@ -245,7 +245,7 @@ fn test_batched_conv2d() { let x_grad = grads.get(&x) * 10.0; for i in 0..10 { - assert_close_to_tensor!(x0, x_grad.clone().select(dev.tensor(i))); + assert_close_to_tensor!(x0, x_grad.clone().select(dev.tensor(i)), 3e-6); } } @@ -405,7 +405,7 @@ fn test_conv2d_grouped() { fn test_conv2d_grouped_slices() { const NUM_GROUPS: usize = 3; let dev: TestDevice = Default::default(); - let x: Tensor, TestDtype, _> = dev.sample_normal(); + let x: Tensor, TestDtype, _> = dev.sample_normal(); let w: Tensor, TestDtype, _> = dev.sample_normal(); let y = (x.leaky_trace(), w.clone()).conv2d( @@ -419,7 +419,7 @@ fn test_conv2d_grouped_slices() { let x_group = x .clone() .slice((.., 3 * i..3 * (i + 1), .., ..)) - .realize::<(Const<2>, Const<3>, Const<14>, Const<14>)>(); + .realize::<(Const<2>, Const<3>, Const<3>, Const<3>)>(); let w_group = w .clone() .slice((5 * i..5 * (i + 1), .., .., ..)) @@ -428,7 +428,7 @@ fn test_conv2d_grouped_slices() { let y_group_true = y .retaped::() .slice((.., 5 * i..5 * (i + 1), .., ..)) - .realize::<(Const<2>, Const<5>, Const<12>, Const<12>)>(); + .realize::<(Const<2>, Const<5>, Const<1>, Const<1>)>(); assert_close_to_tensor!(y_group, y_group_true); } @@ -440,7 +440,7 @@ fn test_conv2d_grouped_slices() { let x_group = x .clone() .slice((.., 3 * i..3 * (i + 1), .., ..)) - .realize::<(Const<2>, Const<3>, Const<14>, Const<14>)>(); + .realize::<(Const<2>, Const<3>, Const<3>, Const<3>)>(); let w_group = w .clone() .slice((5 * i..5 * (i + 1), .., .., ..)) @@ -452,7 +452,7 @@ fn test_conv2d_grouped_slices() { let x_grad_group_true = x_grad .clone() .slice((.., 3 * i..3 * (i + 1), .., ..)) - .realize::<(Const<2>, Const<3>, Const<14>, Const<14>)>(); + .realize::<(Const<2>, Const<3>, Const<3>, Const<3>)>(); let w_grad_group_true = w_grad .clone() .slice((5 * i..5 * (i + 1), .., .., ..)) diff --git a/dfdx-core/src/tensor_ops/convtrans2d/tests.rs b/dfdx-core/src/tensor_ops/convtrans2d/tests.rs index 3d64acbf0..c36702940 100644 --- a/dfdx-core/src/tensor_ops/convtrans2d/tests.rs +++ b/dfdx-core/src/tensor_ops/convtrans2d/tests.rs @@ -280,10 +280,10 @@ fn test_convtrans2d_padded() { #[test] fn test_convtrans2d_batched() { let dev: TestDevice = Default::default(); - let x: Tensor, TestDtype, _> = dev.sample_normal(); + let x: Tensor, TestDtype, _> = dev.sample_normal(); let w: Tensor, TestDtype, _> = dev.sample_normal(); - let y: Tensor, _, _, _> = + let y: Tensor, _, _, _> = (x.leaky_trace(), w.clone()).convtrans2d(Const::<3>, Const::<2>, Const::<1>, Const::<1>); let y0 = y.retaped::(); let grads0 = y.square().mean().backward(); @@ -291,10 +291,10 @@ fn test_convtrans2d_batched() { let w0 = grads0.get(&w); let x = x - .broadcast::, _>() - .reshape::>(); + .broadcast::, _>() + .reshape::>(); - let y: Tensor, _, _, _> = + let y: Tensor, _, _, _> = (x.leaky_trace(), w.clone()).convtrans2d(Const::<3>, Const::<2>, Const::<1>, Const::<1>); for i in 0..10 { assert_close_to_tensor!(y0, y.retaped::().select(dev.tensor(i)), 1e-5); diff --git a/dfdx-core/src/tensor_ops/log_softmax.rs b/dfdx-core/src/tensor_ops/log_softmax.rs index 487c33e5a..d98bc3304 100644 --- a/dfdx-core/src/tensor_ops/log_softmax.rs +++ b/dfdx-core/src/tensor_ops/log_softmax.rs @@ -81,7 +81,7 @@ mod tests { #[test] fn test_log_softmax_equivalence() { let dev: TestDevice = Default::default(); - let t: Tensor, TestDtype, _> = dev.sample_normal(); + let t: Tensor, TestDtype, _> = dev.sample_normal(); let p = t.leaky_trace().log_softmax::>(); let p_truth = t.leaky_trace() - t.leaky_trace().logsumexp::<_, Axis<3>>().broadcast(); // we can't create an array as it will overflow the stack diff --git a/dfdx-core/src/tensor_ops/matmul/mod.rs b/dfdx-core/src/tensor_ops/matmul/mod.rs index 5e4d03b31..d133b9abe 100644 --- a/dfdx-core/src/tensor_ops/matmul/mod.rs +++ b/dfdx-core/src/tensor_ops/matmul/mod.rs @@ -346,21 +346,21 @@ mod tests { } { - let a: Tensor, TestDtype, _> = dev.zeros(); + let a: Tensor, TestDtype, _> = dev.zeros(); let b: Tensor, TestDtype, _> = dev.zeros(); - let _: Tensor, TestDtype, _> = a.matmul(b); + let _: Tensor, TestDtype, _> = a.matmul(b); } { - let a: Tensor, TestDtype, _> = dev.zeros(); - let b: Tensor, TestDtype, _> = dev.zeros(); - let _: Tensor, TestDtype, _> = a.matmul(b); + let a: Tensor, TestDtype, _> = dev.zeros(); + let b: Tensor, TestDtype, _> = dev.zeros(); + let _: Tensor, TestDtype, _> = a.matmul(b); } { - let a: Tensor, TestDtype, _> = dev.zeros(); - let b: Tensor, TestDtype, _> = dev.zeros(); - let _: Tensor, TestDtype, _> = a.matmul(b); + let a: Tensor, TestDtype, _> = dev.zeros(); + let b: Tensor, TestDtype, _> = dev.zeros(); + let _: Tensor, TestDtype, _> = a.matmul(b); } } @@ -427,7 +427,7 @@ mod tests { #[test] fn test_matmul_broadcast() { - const N: usize = 5; + const N: usize = 2; let dev: TestDevice = Default::default(); let a: Tensor, TestDtype, _> = dev.sample_normal(); let a_array = a.array(); @@ -458,7 +458,7 @@ mod tests { #[test] fn test_matmul_broadcast_actual() { - const N: usize = 5; + const N: usize = 2; let dev: TestDevice = Default::default(); let a: Tensor, TestDtype, _> = dev.sample_normal(); let b: Tensor, TestDtype, _> = dev.sample_normal(); @@ -476,9 +476,9 @@ mod tests { fn test_matmul_batched_3d() { let dev: TestDevice = Default::default(); - let a: Tensor, TestDtype, _> = dev.sample_normal(); + let a: Tensor, TestDtype, _> = dev.sample_normal(); let a_array = a.array(); - let b: Tensor, TestDtype, _> = dev.sample_normal(); + let b: Tensor, TestDtype, _> = dev.sample_normal(); let b_array = b.array(); let c = a.leaky_trace().matmul(b.clone()); let c_array = c.array(); @@ -487,7 +487,7 @@ mod tests { let g_a = g.get(&a).array(); let g_b = g.get(&b).array(); - for i in 0..5 { + for i in 0..2 { let sub_a = dev.tensor(a_array[i]); let sub_b = dev.tensor(b_array[i]); let sub_c = sub_a.leaky_trace().matmul(sub_b.clone()); @@ -502,9 +502,9 @@ mod tests { fn test_matmul_batched_4d() { let dev: TestDevice = Default::default(); - let a: Tensor, TestDtype, _> = dev.sample_normal(); + let a: Tensor, TestDtype, _> = dev.sample_normal(); let a_array = a.array(); - let b: Tensor, TestDtype, _> = dev.sample_normal(); + let b: Tensor, TestDtype, _> = dev.sample_normal(); let b_array = b.array(); let c = a.leaky_trace().matmul(b.clone()); let c_array = c.array(); @@ -513,8 +513,8 @@ mod tests { let g_a = g.get(&a).array(); let g_b = g.get(&b).array(); - for i in 0..7 { - for j in 0..5 { + for i in 0..2 { + for j in 0..3 { let sub_a = dev.tensor(a_array[i][j]); let sub_b = dev.tensor(b_array[i][j]); let sub_c = sub_a.leaky_trace().matmul(sub_b.clone()); diff --git a/dfdx-core/src/tensor_ops/softmax.rs b/dfdx-core/src/tensor_ops/softmax.rs index 0a6ec8aab..a45436c83 100644 --- a/dfdx-core/src/tensor_ops/softmax.rs +++ b/dfdx-core/src/tensor_ops/softmax.rs @@ -91,7 +91,7 @@ mod tests { #[test] fn test_softmax_equivalence() { let dev: TestDevice = Default::default(); - let t: Tensor, TestDtype, _> = dev.sample_normal(); + let t: Tensor, TestDtype, _> = dev.sample_normal(); let p = t.leaky_trace().softmax::>(); let p_truth = t.leaky_trace().log_softmax::>().exp(); // we can't create an array as it will overflow the stack