@@ -38,6 +38,28 @@ const int kAdamCorrectionIterations = 200000;
38
38
// Epsilon in Adam to prevent division by zero.
39
39
const double kAdamEpsilon = 1e-8 ;
40
40
41
+ // Computes matrix.vector v = Wu.
42
+ // u is of size W.dim2() - add_bias_fwd and the output v is of size
43
+ // W.dim1() - skip_bias_back.
44
+ // If add_bias_fwd, u is imagined to have an extra element at the end with value
45
+ // 1, to implement the bias, weight.
46
+ // If skip_bias_back, we are actullay performing the backwards product on a
47
+ // transposed matrix, so we need to drop the v output corresponding to the last
48
+ // element in dim1.
49
+ static inline void MatrixDotVectorInternal (const GENERIC_2D_ARRAY<double >& w,
50
+ bool add_bias_fwd,
51
+ bool skip_bias_back, const double * u,
52
+ double * v) {
53
+ int num_results = w.dim1 () - skip_bias_back;
54
+ int extent = w.dim2 () - add_bias_fwd;
55
+ for (int i = 0 ; i < num_results; ++i) {
56
+ const double * wi = w[i];
57
+ double total = WeightMatrix::DotProduct (wi, u, extent);
58
+ if (add_bias_fwd) total += wi[extent]; // The bias value.
59
+ v[i] = total;
60
+ }
61
+ }
62
+
41
63
// Copies the whole input transposed, converted to double, into *this.
42
64
void TransposedArray::Transpose (const GENERIC_2D_ARRAY<double >& input) {
43
65
int width = input.dim1 ();
@@ -401,26 +423,4 @@ void WeightMatrix::FloatToDouble(const GENERIC_2D_ARRAY<float>& wf,
401
423
}
402
424
}
403
425
404
- // Computes matrix.vector v = Wu.
405
- // u is of size W.dim2() - add_bias_fwd and the output v is of size
406
- // W.dim1() - skip_bias_back.
407
- // If add_bias_fwd, u is imagined to have an extra element at the end with value
408
- // 1, to implement the bias, weight.
409
- // If skip_bias_back, we are actullay performing the backwards product on a
410
- // transposed matrix, so we need to drop the v output corresponding to the last
411
- // element in dim1.
412
- void WeightMatrix::MatrixDotVectorInternal (const GENERIC_2D_ARRAY<double >& w,
413
- bool add_bias_fwd,
414
- bool skip_bias_back, const double * u,
415
- double * v) {
416
- int num_results = w.dim1 () - skip_bias_back;
417
- int extent = w.dim2 () - add_bias_fwd;
418
- for (int i = 0 ; i < num_results; ++i) {
419
- const double * wi = w[i];
420
- double total = DotProduct (wi, u, extent);
421
- if (add_bias_fwd) total += wi[extent]; // The bias value.
422
- v[i] = total;
423
- }
424
- }
425
-
426
426
} // namespace tesseract.
0 commit comments