@@ -964,9 +964,9 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) {
964
964
965
965
static inline float32x4_t __lzs_f16cx4_load(const ggml_fp16_t * x) {
966
966
#ifdef __NNPA__
967
- uint16x8_t tmp = vec_xl(0, (const ggml_fp16_t *)x);
968
- uint16x8_t nnpa = vec_convert_from_fp16(tmp , 0);
969
- return vec_extend_to_fp32_hi(nnpa , 0);
967
+ uint16x8_t v_x = vec_xl(0, (const ggml_fp16_t *)x);
968
+ uint16x8_t nnpa_dlf16 = vec_convert_from_fp16(v_x , 0);
969
+ return vec_extend_to_fp32_hi(nnpa_dlf16 , 0);
970
970
#else
971
971
float tmp[4];
972
972
@@ -980,20 +980,20 @@ static inline float32x4_t __lzs_f16cx4_load(const ggml_fp16_t * x) {
980
980
#endif
981
981
}
982
982
983
- static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t y ) {
983
+ static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y ) {
984
984
#ifdef __NNPA__
985
985
float32x4_t zero = vec_splats(0.0f);
986
- uint16x8_t nnpa = vec_round_from_fp32(y , zero, 0);
987
- x[0] = nnpa[0] ;
988
- x[1] = nnpa[1] ;
989
- x[2] = nnpa[2] ;
990
- x[3] = nnpa[3] ;
986
+ uint16x8_t v_x = vec_round_from_fp32(v_y , zero, 0);
987
+ x[0] = vec_extract(v_x, 0) ;
988
+ x[1] = vec_extract(v_x, 1) ;
989
+ x[2] = vec_extract(v_x, 2) ;
990
+ x[3] = vec_extract(v_x, 3) ;
991
991
#else
992
992
float arr[4];
993
993
994
994
// note: keep type-cast here to prevent compiler bugs
995
995
// see: https://github.com/ggml-org/llama.cpp/issues/12846
996
- vec_xst(y , 0, (float *)(arr));
996
+ vec_xst(v_y , 0, (float *)(arr));
997
997
998
998
for (int i = 0; i < 4; i++) {
999
999
x[i] = GGML_FP32_TO_FP16(arr[i]);
0 commit comments