1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
|
#include <immintrin.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <time.h>
typedef unsigned int u32;
u32 SingleScalar(u32 Count, u32 *Input) { u32 Sum = 0; for (u32 Index = 0; Index < Count; ++Index) { Sum += Input[Index]; } return Sum; }
u32 Unroll2Scalar(u32 Count, u32 *Input) { u32 Sum = 0; for (u32 Index = 0; Index < Count; Index += 2) { Sum += Input[Index]; Sum += Input[Index + 1]; } return Sum; }
u32 Unroll4Scalar(u32 Count, u32 *Input) { u32 Sum = 0; for (u32 Index = 0; Index < Count; Index += 4) { Sum += Input[Index]; Sum += Input[Index + 1]; Sum += Input[Index + 2]; Sum += Input[Index + 3]; } return Sum; }
u32 DualScalar(u32 Count, u32 *Input) { u32 SumA = 0; u32 SumB = 0; for (u32 Index = 0; Index < Count; Index += 2) { SumA += Input[Index + 0]; SumB += Input[Index + 1]; } u32 Sum = SumA + SumB; return Sum; }
u32 QuadScalar(u32 Count, u32 *Input) { u32 SumA = 0; u32 SumB = 0; u32 SumC = 0; u32 SumD = 0; for (u32 Index = 0; Index < Count; Index += 4) { SumA += Input[Index + 0]; SumB += Input[Index + 1]; SumC += Input[Index + 2]; SumD += Input[Index + 3]; } u32 Sum = SumA + SumB + SumC + SumD; return Sum; }
u32 QuadScalarPtr(u32 Count, u32 *Input) { u32 SumA = 0; u32 SumB = 0; u32 SumC = 0; u32 SumD = 0;
Count /= 4; while (Count--) { SumA += Input[0]; SumB += Input[1]; SumC += Input[2]; SumD += Input[3]; Input += 4; }
u32 Sum = SumA + SumB + SumC + SumD; return Sum; }
u32 __attribute__((target("ssse3"))) SingleSSE(u32 Count, u32 *Input) { __m128i Sum = _mm_setzero_si128(); for (u32 Index = 0; Index < Count; Index += 4) { Sum = _mm_add_epi32(Sum, _mm_loadu_si128((__m128i *)&Input[Index])); } Sum = _mm_hadd_epi32(Sum, Sum); Sum = _mm_hadd_epi32(Sum, Sum); return _mm_cvtsi128_si32(Sum); }
u32 __attribute__((target("avx2"))) SingleAVX(u32 Count, u32 *Input) { __m256i Sum = _mm256_setzero_si256(); for (u32 Index = 0; Index < Count; Index += 8) { Sum = _mm256_add_epi32(Sum, _mm256_loadu_si256((__m256i *)&Input[Index])); } Sum = _mm256_hadd_epi32(Sum, Sum); Sum = _mm256_hadd_epi32(Sum, Sum); __m256i SumS = _mm256_permute2x128_si256(Sum, Sum, 1 | (1 << 4)); Sum = _mm256_add_epi32(Sum, SumS);
return _mm256_cvtsi256_si32(Sum); }
u32 __attribute__((target("avx2"))) DualAVX(u32 Count, u32 *Input) { __m256i SumA = _mm256_setzero_si256(); __m256i SumB = _mm256_setzero_si256(); for (u32 Index = 0; Index < Count; Index += 16) { SumA = _mm256_add_epi32(SumA, _mm256_loadu_si256((__m256i *)&Input[Index])); SumB = _mm256_add_epi32(SumB, _mm256_loadu_si256((__m256i *)&Input[Index + 8])); }
__m256i Sum = _mm256_add_epi32(SumA, SumB); Sum = _mm256_hadd_epi32(Sum, Sum); Sum = _mm256_hadd_epi32(Sum, Sum); __m256i SumS = _mm256_permute2x128_si256(Sum, Sum, 1 | (1 << 4)); Sum = _mm256_add_epi32(Sum, SumS);
return _mm256_cvtsi256_si32(Sum); }
u32 __attribute__((target("avx2"))) QuadAVX(u32 Count, u32 *Input) { __m256i SumA = _mm256_setzero_si256(); __m256i SumB = _mm256_setzero_si256(); __m256i SumC = _mm256_setzero_si256(); __m256i SumD = _mm256_setzero_si256(); for (u32 Index = 0; Index < Count; Index += 32) { SumA = _mm256_add_epi32(SumA, _mm256_loadu_si256((__m256i *)&Input[Index])); SumB = _mm256_add_epi32(SumB, _mm256_loadu_si256((__m256i *)&Input[Index + 8])); SumC = _mm256_add_epi32(SumC, _mm256_loadu_si256((__m256i *)&Input[Index + 16])); SumD = _mm256_add_epi32(SumD, _mm256_loadu_si256((__m256i *)&Input[Index + 24])); }
__m256i SumAB = _mm256_add_epi32(SumA, SumB); __m256i SumCD = _mm256_add_epi32(SumC, SumD); __m256i Sum = _mm256_add_epi32(SumAB, SumCD); Sum = _mm256_hadd_epi32(Sum, Sum); Sum = _mm256_hadd_epi32(Sum, Sum); __m256i SumS = _mm256_permute2x128_si256(Sum, Sum, 1 | (1 << 4)); Sum = _mm256_add_epi32(Sum, SumS);
return _mm256_cvtsi256_si32(Sum); }
u32 __attribute__((target("avx2"))) QuadAVXPtr(u32 Count, u32 *Input) { __m256i SumA = _mm256_setzero_si256(); __m256i SumB = _mm256_setzero_si256(); __m256i SumC = _mm256_setzero_si256(); __m256i SumD = _mm256_setzero_si256();
Count /= 32; while (Count--) { SumA = _mm256_add_epi32(SumA, _mm256_loadu_si256((__m256i *)&Input[0])); SumB = _mm256_add_epi32(SumB, _mm256_loadu_si256((__m256i *)&Input[8])); SumC = _mm256_add_epi32(SumC, _mm256_loadu_si256((__m256i *)&Input[16])); SumD = _mm256_add_epi32(SumD, _mm256_loadu_si256((__m256i *)&Input[24]));
Input += 32; }
__m256i SumAB = _mm256_add_epi32(SumA, SumB); __m256i SumCD = _mm256_add_epi32(SumC, SumD); __m256i Sum = _mm256_add_epi32(SumAB, SumCD);
Sum = _mm256_hadd_epi32(Sum, Sum); Sum = _mm256_hadd_epi32(Sum, Sum); __m256i SumS = _mm256_permute2x128_si256(Sum, Sum, 1 | (1 << 4)); Sum = _mm256_add_epi32(Sum, SumS);
return _mm256_cvtsi256_si32(Sum); }
#if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \ defined(_M_IX86) static inline uint64_t read_cpu_timer(void) { unsigned int lo, hi; __asm__ volatile("rdtsc" : "=a"(lo), "=d"(hi)); return ((uint64_t)hi << 32) | lo; } #else static inline uint64_t read_cpu_timer(void) { return (uint64_t)clock(); } #endif
#define ARRAY_SIZE 4096 #define ITERATIONS 100000
#define TEST_FUNCTION(func_name, count, input) \ do { \ uint64_t min_cycles = UINT64_MAX; \ volatile u32 result = 0; \ for (int iter = 0; iter < ITERATIONS; iter++) { \ uint64_t start = read_cpu_timer(); \ result = func_name(count, input); \ uint64_t end = read_cpu_timer(); \ uint64_t elapsed = end - start; \ if (elapsed < min_cycles) \ min_cycles = elapsed; \ } \ double adds_per_cycle = (double)(count) / (double)min_cycles; \ printf("%-18s: %8llu cycles, %.6f adds/cycle\n", #func_name, \ (unsigned long long)min_cycles, adds_per_cycle); \ fflush(stdout); \ } while (0)
int main(void) { u32 *input = (u32 *)malloc(ARRAY_SIZE * sizeof(u32)); if (!input) { fprintf(stderr, "Memory allocation failed\n"); return 1; }
srand(42); for (int i = 0; i < ARRAY_SIZE; i++) { input[i] = rand() % 100; }
printf("=== IPC & SIMD Demo ===\n"); printf("Array size: %d integers\n", ARRAY_SIZE); printf("Iterations: %d\n\n", ITERATIONS);
printf("--- Scalar: Single Dependency Chain ---\n"); TEST_FUNCTION(SingleScalar, ARRAY_SIZE, input); TEST_FUNCTION(Unroll2Scalar, ARRAY_SIZE, input); TEST_FUNCTION(Unroll4Scalar, ARRAY_SIZE, input);
printf("\n--- Scalar: Broken Dependency Chain ---\n"); TEST_FUNCTION(DualScalar, ARRAY_SIZE, input); TEST_FUNCTION(QuadScalar, ARRAY_SIZE, input); TEST_FUNCTION(QuadScalarPtr, ARRAY_SIZE, input);
printf("\n--- SIMD Versions (SSE/AVX) ---\n"); TEST_FUNCTION(SingleSSE, ARRAY_SIZE, input); TEST_FUNCTION(SingleAVX, ARRAY_SIZE, input);
printf("\n--- SIMD: Unrolled & Broken Dependency ---\n"); TEST_FUNCTION(DualAVX, ARRAY_SIZE, input); TEST_FUNCTION(QuadAVX, ARRAY_SIZE, input); TEST_FUNCTION(QuadAVXPtr, ARRAY_SIZE, input);
free(input); return 0; }
|