feat: add vec3 and vec4 functions

This commit is contained in:
2025-06-21 13:48:10 +02:00
parent 3703ab17b0
commit 187908ccec
5 changed files with 213 additions and 18 deletions

View File

@@ -3,7 +3,7 @@
int main(void) int main(void)
{ {
Vec4f_t vec = vec4(1.f, 2.f, 8.f, 4.f); Vec4f_t vec = vec4f(1.f, 2.f, 8.f, 4.f);
printf("%f %f %f %f\n", vec.x, vec.y, vec.z, vec.w); printf("%f %f %f %f\n", vec.x, vec.y, vec.z, vec.w);
Vec4f_t vec2 = vec4f_clone(&vec); Vec4f_t vec2 = vec4f_clone(&vec);

139
src/math/vec3.c Normal file
View File

@@ -0,0 +1,139 @@
//
// vec3.c
// main
//
// Created by Loïc GUEZO on 21/06/2025.
//
#include "vec3.h"
#define VEC_SIZE 3
Vec3f_t vec3f(float x, float y, float z)
{
return (Vec3f_t){.x = x, .y = y, .z = z};
}
Vec3f_t vec3f_from_array(const float *__restrict val)
{
Vec3f_t vec;
#if defined (SIMD_X86)
__m128 arr = _mm_loadu_ps(val);
_mm_store_ps(vec.data, arr);
#elif defined (SIMD_ARCH)
float32x4_t arr = vld1q_f32(val);
vst1q_f32(vec.data, arr);
#else
for (int i = 0; i < VEC_SIZE; i++) {
vec.data[i] = val[i];
}
#endif
return vec;
}
Vec3f_t vec3f_scalar(float f)
{
Vec3f_t vec;
#if defined(SIMD_X86)
__m128 scalar = _mm_set1_ps(f);
_mm_store_ps(vec.data, scalar);
#elif defined(SIMD_ARCH)
float32x4_t scalar = vdupq_n_f32(f);
vst1q_f32(vec.data, scalar);
#else
for (int i = 0; i < VEC_SIZE; i++) {
vec.data[i] = f;
}
#endif
return vec;
}
Vec3f_t vec3f_zero(void)
{
return vec3f_scalar(0.f);
}
Vec3f_t vec3f_add_r(Vec3f_t *__restrict out, Vec3f_t a)
{
#if defined (SIMD_X86)
__m128 va = _mm_load_ps(a.data);
__m128 vb = _mm_load_ps(out->data);
__m128 vres = _mm_add_ps(va, vb);
_mm_store_ps(out->data, vres);
#elif defined (SIMD_ARCH)
float32x4_t va = vld1q_f32(a.data);
float32x4_t vb = vld1q_f32(out->data);
float32x4_t vres = vaddq_f32(va, vb);
vst1q_f32(out->data, vres);
#else
for (int i = 0; i < VEC_SIZE; i++) {
out->data[i] += a.data[i];
}
#endif
return *out;
}
Vec3f_t vec3f_add(Vec3f_t a, Vec3f_t b)
{
return vec3f_add_r(&a, b);
}
Vec3f_t vec3f_sub_r(Vec3f_t *__restrict out, Vec3f_t a)
{
#if defined (SIMD_X86)
__m128 va = _mm_load_ps(out->data);
__m128 vb = _mm_load_ps(a.data);
__m128 vres = _mm_sub_ps(va, vb);
_mm_store_ps(out->data, vres);
#elif defined (SIMD_ARCH)
float32x4_t va = vld1q_f32(out->data);
float32x4_t vb = vld1q_f32(a.data);
float32x4_t vres = vsubq_f32(va, vb);
vst1q_f32(out->data, vres);
#else
for (int i = 0; i < VEC_SIZE; i++) {
out->data[i] -= a.data[i];
}
#endif
return *out;
}
Vec3f_t vec3f_sub(Vec3f_t a, Vec3f_t b)
{
return vec3f_sub_r(&a, b);
}
Vec3f_t vec3f_scale_r(Vec3f_t *__restrict out, float scalar)
{
#if defined (SIMD_X86)
__m128 va = _mm_load_ps(out->data);
__m128 vb = _mm_set1_ps(scalar);
__m128 vres = _mm_mul_ps(va, vb);
_mm_store_ps(out->data, vres);
#elif defined (SIMD_ARCH)
float32x4_t va = vld1q_f32(out->data);
float32x4_t vb = vdupq_n_f32(scalar);
float32x4_t vres = vmulq_f32(va, vb);
vst1q_f32(out->data, vres);
#else
for (int i = 0; i < VEC_SIZE; i++) {
out->data[i] *= scalar;
}
#endif
return *out;
}
Vec3f_t vec3f_scale(Vec3f_t a, float scalar)
{
return vec3f_scale_r(&a, scalar);
}
//Vec3f_t vec3f_add_r(Vec3f_t *__restrict out, Vec3f_t a);
//Vec3f_t vec3f_add(Vec3f_t a, Vec3f_t b);
//
//Vec3f_t vec3f_sub_r(Vec3f_t *__restrict out, Vec3f_t a);
//Vec3f_t vec3f_sub(Vec3f_t a, Vec3f_t b);
//
//Vec3f_t vec3f_scale_r(Vec3f_t *__restrict out, float scale);
//Vec3f_t vec3f_scale(Vec3f_t a, float scale);

40
src/math/vec3.h Normal file
View File

@@ -0,0 +1,40 @@
//
// vec3.h
// main
//
// Created by Loïc GUEZO on 21/06/2025.
//
#ifndef vec3_h
#define vec3_h
#include "common_math.h"
typedef union
{
struct {float x, y, z; };
float data[3];
} __attribute__((aligned(16))) Vec3f_t;
Vec3f_t vec3f_from_array(const float *__restrict val);
Vec3f_t vec3f(float x, float y, float z);
// (f, f, f)
Vec3f_t vec3f_scalar(float f);
// (0, 0, 0)
Vec3f_t vec3f_zero(void);
inline static Vec3f_t vec3f_clone(const Vec3f_t *__restrict v)
{
return *v;
}
Vec3f_t vec3f_add_r(Vec3f_t *__restrict out, Vec3f_t a);
Vec3f_t vec3f_add(Vec3f_t a, Vec3f_t b);
Vec3f_t vec3f_sub_r(Vec3f_t *__restrict out, Vec3f_t a);
Vec3f_t vec3f_sub(Vec3f_t a, Vec3f_t b);
Vec3f_t vec3f_scale_r(Vec3f_t *__restrict out, float scale);
Vec3f_t vec3f_scale(Vec3f_t a, float scale);
#endif /* vec3_h */

View File

@@ -1,15 +1,27 @@
#include "vec4.h" #include "vec4.h"
#include "common_math.h" #include "common_math.h"
Vec4f_t vec4(float x, float y, float z, float w) #define VEC_SIZE 4
Vec4f_t vec4f(float x, float y, float z, float w)
{ {
return (Vec4f_t){.x = x, .y = y, .z = z, .w = w}; return (Vec4f_t){.x = x, .y = y, .z = z, .w = w};
} }
Vec4f_t vec4f_from_array(float *__restrict val) Vec4f_t vec4f_from_array(const float *__restrict val)
{ {
Vec4f_t vec; Vec4f_t vec;
memcpy(vec.data, val, 4*sizeof(float)); #if defined (SIMD_X86)
__m128 arr = _mm_load_ps(val);
_mm_store_ps(vec.data, arr);
#elif defined (SIMD_ARCH)
float32x4_t arr = vld1q_f32(val);
vst1q_f32(vec.data, arr);
#else
for(int i = 0; i<VEC_SIZE; i++) {
vec.data[i] = val[i];
}
#endif
return vec; return vec;
} }
@@ -21,7 +33,7 @@ Vec4f_t vec4f_scalar(float f)
// add all register into data // add all register into data
#if defined(SIMD_X86) #if defined(SIMD_X86)
__m128 scalar = _mm_set1_ps(f); __m128 scalar = _mm_set1_ps(f);
_mm_storeu_ps(vec4.data, scalar); _mm_store_ps(vec4.data, scalar);
#elif defined(SIMD_ARCH) #elif defined(SIMD_ARCH)
float32x4_t scalar = vdupq_n_f32(f); float32x4_t scalar = vdupq_n_f32(f);
@@ -29,7 +41,7 @@ Vec4f_t vec4f_scalar(float f)
// add one by one each value to their specific address // add one by one each value to their specific address
#else #else
for (int i = 0; i < 4; i++) { for (int i = 0; i < VEC_SIZE; i++) {
vec4.data[i] = f; vec4.data[i] = f;
} }
#endif #endif
@@ -47,7 +59,7 @@ Vec4f_t vec4f_add_r(Vec4f_t *__restrict out, Vec4f_t a)
__m128 va = _mm_load_ps(a.data); __m128 va = _mm_load_ps(a.data);
__m128 vb = _mm_load_ps(out->data); __m128 vb = _mm_load_ps(out->data);
__m128 vres = _mm_add_ps(va, vb); __m128 vres = _mm_add_ps(va, vb);
_mm_storeu_ps(out->data, vres); _mm_store_ps(out->data, vres);
#elif defined (SIMD_ARCH) #elif defined (SIMD_ARCH)
float32x4_t va = vld1q_f32(a.data); float32x4_t va = vld1q_f32(a.data);
@@ -55,7 +67,7 @@ Vec4f_t vec4f_add_r(Vec4f_t *__restrict out, Vec4f_t a)
float32x4_t vres = vaddq_f32(va, vb); float32x4_t vres = vaddq_f32(va, vb);
vst1q_f32(out->data, vres); vst1q_f32(out->data, vres);
#else #else
for(int i = 0; i<4; i++) { for(int i = 0; i<VEC_SIZE; i++) {
out->data[i] += a.data[i]; out->data[i] += a.data[i];
} }
#endif #endif
@@ -74,7 +86,7 @@ Vec4f_t vec4f_sub_r(Vec4f_t *__restrict out, Vec4f_t a)
__m128 va = _mm_load_ps(out->data); __m128 va = _mm_load_ps(out->data);
__m128 vb = _mm_load_ps(a.data); __m128 vb = _mm_load_ps(a.data);
__m128 vres = _mm_sub_ps(va, vb); __m128 vres = _mm_sub_ps(va, vb);
_mm_storeu_ps(out->data, vres); _mm_store_ps(out->data, vres);
#elif defined (SIMD_ARCH) #elif defined (SIMD_ARCH)
float32x4_t va = vld1q_f32(a.data); float32x4_t va = vld1q_f32(a.data);
@@ -83,7 +95,7 @@ Vec4f_t vec4f_sub_r(Vec4f_t *__restrict out, Vec4f_t a)
vst1q_f32(out->data, vres); vst1q_f32(out->data, vres);
#else #else
for(int i = 0; i<4; i++) { for(int i = 0; i<VEC_SIZE; i++) {
out->data[i] -= a.data[i]; out->data[i] -= a.data[i];
} }
#endif #endif
@@ -102,7 +114,7 @@ Vec4f_t vec4f_scale_r(Vec4f_t *__restrict out, float scalar)
__m128 va = _mm_load_ps(out->data); __m128 va = _mm_load_ps(out->data);
__m128 vb = _mm_set1_ps(scalar); __m128 vb = _mm_set1_ps(scalar);
__m128 vres = _mm_mul_ps(va, vb); __m128 vres = _mm_mul_ps(va, vb);
_mm_storeu_ps(out->data, vres); _mm_store_ps(out->data, vres);
#elif defined (SIMD_ARCH) #elif defined (SIMD_ARCH)
float32x4_t va = vld1q_f32(out->data); float32x4_t va = vld1q_f32(out->data);
@@ -125,17 +137,21 @@ Vec4f_t vec4f_scale(Vec4f_t a, float scalar)
//float vec4f_dot(Vec4f_t a, Vec4f_t b) //float vec4f_dot(Vec4f_t a, Vec4f_t b)
//{ //{
// float result;
//#if defined (SIMD_X86) //#if defined (SIMD_X86)
// __m128 va = _mm_load_ps(a.data); // __m128 va = _mm_load_ps(a.data);
// __m128 vb = _mm_load_ps(b.data); // __m128 vb = _mm_load_ps(b.data);
// __m128 vres = _mm_mul_ps(va, vb); // __m128 vres = _mm_mul_ps(va, vb);
// return //
// __m128 shuf =
// result = 0.f;
// //
//#elif defined (SIMD_ARCH) //#elif defined (SIMD_ARCH)
// // result = 0.f;
//#else //#else
// // result = a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
//#endif //#endif
// return result;
//} //}
// float vec4_dot(Vec4_t a, Vec4_t b) // float vec4_dot(Vec4_t a, Vec4_t b)

View File

@@ -8,14 +8,14 @@ typedef union
float data[4]; float data[4];
}__attribute__((aligned(16))) Vec4f_t; }__attribute__((aligned(16))) Vec4f_t;
Vec4f_t vec4f_from_array(float *__restrict val); Vec4f_t vec4f_from_array(const float *__restrict val);
Vec4f_t vec4(float x, float y, float z, float w); Vec4f_t vec4f(float x, float y, float z, float w);
// (f, f, f, f) // (f, f, f, f)
Vec4f_t vec4f_scalar(float f); Vec4f_t vec4f_scalar(float f);
// (0, 0, 0, 0) // (0, 0, 0, 0)
Vec4f_t vec4f_zero(void); Vec4f_t vec4f_zero(void);
inline static Vec4f_t vec4f_clone(Vec4f_t *__restrict v) inline static Vec4f_t vec4f_clone(const Vec4f_t *__restrict v)
{ {
return *v; return *v;
} }