summaryrefslogtreecommitdiff
path: root/src/include/port/pg_lfind.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/include/port/pg_lfind.h')
-rw-r--r--src/include/port/pg_lfind.h65
1 files changed, 39 insertions, 26 deletions
diff --git a/src/include/port/pg_lfind.h b/src/include/port/pg_lfind.h
index a4e13dffec0..d575e733d34 100644
--- a/src/include/port/pg_lfind.h
+++ b/src/include/port/pg_lfind.h
@@ -91,16 +91,19 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
{
uint32 i = 0;
-#ifdef USE_SSE2
+#ifndef USE_NO_SIMD
/*
- * A 16-byte register only has four 4-byte lanes. For better
- * instruction-level parallelism, each loop iteration operates on a block
- * of four registers. Testing has showed this is ~40% faster than using a
- * block of two registers.
+ * For better instruction-level parallelism, each loop iteration operates
+ * on a block of four registers. Testing for SSE2 has showed this is ~40%
+ * faster than using a block of two registers.
*/
- const __m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */
- uint32 iterations = nelem & ~0xF; /* round down to multiple of 16 */
+ const Vector32 keys = vector32_broadcast(key); /* load copies of key */
+ const uint32 nelem_per_vector = sizeof(Vector32) / sizeof(uint32);
+ const uint32 nelem_per_iteration = 4 * nelem_per_vector;
+
+ /* round down to multiple of elements per iteration */
+ const uint32 tail_idx = nelem & ~(nelem_per_iteration - 1);
#if defined(USE_ASSERT_CHECKING)
bool assert_result = false;
@@ -116,49 +119,59 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
}
#endif
- for (i = 0; i < iterations; i += 16)
+ for (i = 0; i < tail_idx; i += nelem_per_iteration)
{
- /* load the next block into 4 registers holding 4 values each */
- const __m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]);
- const __m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]);
- const __m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]);
- const __m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]);
+ Vector32 vals1,
+ vals2,
+ vals3,
+ vals4,
+ result1,
+ result2,
+ result3,
+ result4,
+ tmp1,
+ tmp2,
+ result;
+
+ /* load the next block into 4 registers */
+ vector32_load(&vals1, &base[i]);
+ vector32_load(&vals2, &base[i + nelem_per_vector]);
+ vector32_load(&vals3, &base[i + nelem_per_vector * 2]);
+ vector32_load(&vals4, &base[i + nelem_per_vector * 3]);
/* compare each value to the key */
- const __m128i result1 = _mm_cmpeq_epi32(keys, vals1);
- const __m128i result2 = _mm_cmpeq_epi32(keys, vals2);
- const __m128i result3 = _mm_cmpeq_epi32(keys, vals3);
- const __m128i result4 = _mm_cmpeq_epi32(keys, vals4);
+ result1 = vector32_eq(keys, vals1);
+ result2 = vector32_eq(keys, vals2);
+ result3 = vector32_eq(keys, vals3);
+ result4 = vector32_eq(keys, vals4);
/* combine the results into a single variable */
- const __m128i tmp1 = _mm_or_si128(result1, result2);
- const __m128i tmp2 = _mm_or_si128(result3, result4);
- const __m128i result = _mm_or_si128(tmp1, tmp2);
+ tmp1 = vector32_or(result1, result2);
+ tmp2 = vector32_or(result3, result4);
+ result = vector32_or(tmp1, tmp2);
/* see if there was a match */
- if (_mm_movemask_epi8(result) != 0)
+ if (vector8_is_highbit_set((Vector8) result))
{
-#if defined(USE_ASSERT_CHECKING)
Assert(assert_result == true);
-#endif
return true;
}
}
-#endif /* USE_SSE2 */
+#endif /* ! USE_NO_SIMD */
/* Process the remaining elements one at a time. */
for (; i < nelem; i++)
{
if (key == base[i])
{
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
Assert(assert_result == true);
#endif
return true;
}
}
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
Assert(assert_result == false);
#endif
return false;