1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 | /*************************************************************************
*
* Project
* _____ _____ __ __ _____
* / ____| __ \| \/ | __ \
* ___ _ __ ___ _ __ | | __| |__) | \ / | |__) |
* / _ \| '_ \ / _ \ '_ \| | |_ | ___/| |\/| | ___/
*| (_) | |_) | __/ | | | |__| | | | | | | |
* \___/| .__/ \___|_| |_|\_____|_| |_| |_|_|
* | |
* |_|
*
* Copyright (C) Akiel Aries, <akiel@akiel.org>, et al.
*
* This software is licensed as described in the file LICENSE, which
* you should have received as part of this distribution. The terms
* among other details are referenced in the official documentation
* seen here : https://akielaries.github.io/openGPMP/ along with
* important files seen in this project.
*
* You may opt to use, copy, modify, merge, publish, distribute
* and/or sell copies of the Software, and permit persons to whom
* the Software is furnished to do so, under the terms of the
* LICENSE file. As this is an Open Source effort, all implementations
* must be of the same methodology.
*
*
*
* This software is distributed on an AS IS basis, WITHOUT
* WARRANTY OF ANY KIND, either express or implied.
*
************************************************************************/
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iostream>
#include <openGPMP/linalg/mtx.hpp>
#include <vector>
#if defined(__x86_64__) || defined(__amd64__) || defined(__amd64)
/************************************************************************
*
* Matrix Operations for AVX ISA
*
************************************************************************/
#if defined(__AVX2__)
// AVX family intrinsics
#include <immintrin.h>
/************************************************************************
*
* Matrix Operations on Arrays
*
************************************************************************/
// matrix addition for 16-bit integers using 256-bit SIMD registers
void gpmp::linalg::Mtx::mtx_add(const int16_t *A,
const int16_t *B,
int16_t *C,
int rows,
int cols) {
// BUG FIXME
for (int i = 0; i < rows; ++i) {
int j = 0;
for (; j < cols - 15; j += 16) {
__m256i a = _mm256_loadu_si256(
reinterpret_cast<const __m256i *>(&A[i * cols + j]));
__m256i b = _mm256_loadu_si256(
reinterpret_cast<const __m256i *>(&B[i * cols + j]));
__m256i c = _mm256_loadu_si256(
reinterpret_cast<const __m256i *>(&C[i * cols + j]));
// Perform vectorized addition and accumulate the result
c = _mm256_add_epi16(c, _mm256_add_epi16(a, b));
// Store the result back to the C matrix
_mm256_storeu_si256(reinterpret_cast<__m256i *>(&C[i * cols + j]),
c);
}
for (; j < cols; ++j) {
C[i * cols + j] = A[i * cols + j] + B[i * cols + j];
}
}
}
void gpmp::linalg::Mtx::mtx_sub(const int16_t *A,
const int16_t *B,
int16_t *C,
int rows,
int cols) {
for (int i = 0; i < rows; ++i) {
int j = 0;
for (; j < cols - 15; j += 16) {
__m256i a = _mm256_loadu_si256(
reinterpret_cast<const __m256i *>(&A[i * cols + j]));
__m256i b = _mm256_loadu_si256(
reinterpret_cast<const __m256i *>(&B[i * cols + j]));
__m256i c = _mm256_loadu_si256(<--- c is initialized
reinterpret_cast<const __m256i *>(&C[i * cols + j]));
// Perform vectorized subtraction and accumulate the result
c = _mm256_sub_epi16(a, b);<--- c is overwritten
// Store the result back to the C matrix
_mm256_storeu_si256(reinterpret_cast<__m256i *>(&C[i * cols + j]),
c);
}
for (; j < cols; ++j) {
C[i * cols + j] = A[i * cols + j] - B[i * cols + j];
}
}
}
void gpmp::linalg::Mtx::mtx_mult(const int16_t *A,
const int16_t *B,
int16_t *C,
int rows_a,
int cols_a,
int cols_b) {
for (int i = 0; i < rows_a; ++i) {
for (int j = 0; j < cols_b; j += 16) {
__m256i c = _mm256_setzero_si256();
for (int k = 0; k < cols_a; ++k) {
__m256i a = _mm256_set1_epi16(A[i * cols_a + k]);
__m256i b = _mm256_loadu_si256(
reinterpret_cast<const __m256i *>(&B[k * cols_b + j]));
__m256i prod = _mm256_mullo_epi16(a, b);
c = _mm256_add_epi16(c, prod);
}
_mm256_storeu_si256(reinterpret_cast<__m256i *>(&C[i * cols_b + j]),
c);
}
// Handle remaining elements
for (int j = cols_b - cols_b % 16; j < cols_b; ++j) {
int sum = 0;
for (int k = 0; k < cols_a; ++k) {
sum += A[i * cols_a + k] * B[k * cols_b + j];
}
C[i * cols_b + j] = sum;
}
}
}
#endif
// x86
#endif
|