44static inline void yuv2rgb_sse2(
45 const uint8_t* u_ ,
const uint8_t* v_,
46 const uint8_t* y0_,
const uint8_t* y1_,
52 const auto* u =
reinterpret_cast<const __m128i*
>(u_);
53 const auto* v =
reinterpret_cast<const __m128i*
>(v_);
54 const auto* y0 =
reinterpret_cast<const __m128i*
>(y0_);
55 const auto* y1 =
reinterpret_cast<const __m128i*
>(y1_);
56 auto* out0 =
reinterpret_cast< __m128i*
>(out0_);
57 auto* out1 =
reinterpret_cast< __m128i*
>(out1_);
60 const __m128i ZERO = _mm_setzero_si128();
61 const __m128i ALPHA = _mm_set1_epi16( -1);
62 const __m128i RED_V = _mm_set1_epi16( 102);
63 const __m128i GREEN_U = _mm_set1_epi16( -25);
64 const __m128i GREEN_V = _mm_set1_epi16( -52);
65 const __m128i BLUE_U = _mm_set1_epi16( 129);
66 const __m128i COEF_Y = _mm_set1_epi16( 74);
67 const __m128i CNST_R = _mm_set1_epi16( -223);
68 const __m128i CNST_G = _mm_set1_epi16( 136);
69 const __m128i CNST_B = _mm_set1_epi16( -277);
70 const __m128i Y_MASK = _mm_set1_epi16(0x00FF);
73 __m128i u0f = _mm_load_si128(u);
74 __m128i v0f = _mm_load_si128(v);
75 __m128i u07 = _mm_unpacklo_epi8(u0f, ZERO);
76 __m128i v07 = _mm_unpacklo_epi8(v0f, ZERO);
77 __m128i mr07 = _mm_srai_epi16(_mm_mullo_epi16(v07, RED_V), 6);
78 __m128i sg07 = _mm_mullo_epi16(v07, GREEN_V);
79 __m128i tg07 = _mm_mullo_epi16(u07, GREEN_U);
80 __m128i mg07 = _mm_srai_epi16(_mm_adds_epi16(sg07, tg07), 6);
81 __m128i mb07 = _mm_srli_epi16(_mm_mullo_epi16(u07, BLUE_U), 6);
82 __m128i dr07 = _mm_adds_epi16(mr07, CNST_R);
83 __m128i dg07 = _mm_adds_epi16(mg07, CNST_G);
84 __m128i db07 = _mm_adds_epi16(mb07, CNST_B);
87 __m128i y00_0f = _mm_load_si128(y0 + 0);
88 __m128i y00_even = _mm_and_si128(y00_0f, Y_MASK);
89 __m128i y00_odd = _mm_srli_epi16(y00_0f, 8);
90 __m128i dy00_even = _mm_srai_epi16(_mm_mullo_epi16(y00_even, COEF_Y), 6);
91 __m128i dy00_odd = _mm_srai_epi16(_mm_mullo_epi16(y00_odd, COEF_Y), 6);
92 __m128i r00_even = _mm_adds_epi16(dr07, dy00_even);
93 __m128i g00_even = _mm_adds_epi16(dg07, dy00_even);
94 __m128i b00_even = _mm_adds_epi16(db07, dy00_even);
95 __m128i r00_odd = _mm_adds_epi16(dr07, dy00_odd);
96 __m128i g00_odd = _mm_adds_epi16(dg07, dy00_odd);
97 __m128i b00_odd = _mm_adds_epi16(db07, dy00_odd);
98 __m128i r00_0f = _mm_unpackhi_epi8(_mm_packus_epi16(r00_even, r00_even),
99 _mm_packus_epi16(r00_odd, r00_odd));
100 __m128i g00_0f = _mm_unpackhi_epi8(_mm_packus_epi16(g00_even, g00_even),
101 _mm_packus_epi16(g00_odd, g00_odd));
102 __m128i b00_0f = _mm_unpackhi_epi8(_mm_packus_epi16(b00_even, b00_even),
103 _mm_packus_epi16(b00_odd, b00_odd));
104 __m128i rb00_07 = _mm_unpacklo_epi8(r00_0f, b00_0f);
105 __m128i rb00_8f = _mm_unpackhi_epi8(r00_0f, b00_0f);
106 __m128i ga00_07 = _mm_unpacklo_epi8(g00_0f, ALPHA);
107 __m128i ga00_8f = _mm_unpackhi_epi8(g00_0f, ALPHA);
108 __m128i rgba00_03 = _mm_unpacklo_epi8(rb00_07, ga00_07);
109 __m128i rgba00_47 = _mm_unpackhi_epi8(rb00_07, ga00_07);
110 __m128i rgba00_8b = _mm_unpacklo_epi8(rb00_8f, ga00_8f);
111 __m128i rgba00_cf = _mm_unpackhi_epi8(rb00_8f, ga00_8f);
112 _mm_store_si128(out0 + 0, rgba00_03);
113 _mm_store_si128(out0 + 1, rgba00_47);
114 _mm_store_si128(out0 + 2, rgba00_8b);
115 _mm_store_si128(out0 + 3, rgba00_cf);
118 __m128i y10_0f = _mm_load_si128(y1 + 0);
119 __m128i y10_even = _mm_and_si128(y10_0f, Y_MASK);
120 __m128i y10_odd = _mm_srli_epi16(y10_0f, 8);
121 __m128i dy10_even = _mm_srai_epi16(_mm_mullo_epi16(y10_even, COEF_Y), 6);
122 __m128i dy10_odd = _mm_srai_epi16(_mm_mullo_epi16(y10_odd, COEF_Y), 6);
123 __m128i r10_even = _mm_adds_epi16(dr07, dy10_even);
124 __m128i g10_even = _mm_adds_epi16(dg07, dy10_even);
125 __m128i b10_even = _mm_adds_epi16(db07, dy10_even);
126 __m128i r10_odd = _mm_adds_epi16(dr07, dy10_odd);
127 __m128i g10_odd = _mm_adds_epi16(dg07, dy10_odd);
128 __m128i b10_odd = _mm_adds_epi16(db07, dy10_odd);
129 __m128i r10_0f = _mm_unpackhi_epi8(_mm_packus_epi16(r10_even, r10_even),
130 _mm_packus_epi16(r10_odd, r10_odd));
131 __m128i g10_0f = _mm_unpackhi_epi8(_mm_packus_epi16(g10_even, g10_even),
132 _mm_packus_epi16(g10_odd, g10_odd));
133 __m128i b10_0f = _mm_unpackhi_epi8(_mm_packus_epi16(b10_even, b10_even),
134 _mm_packus_epi16(b10_odd, b10_odd));
135 __m128i rb10_07 = _mm_unpacklo_epi8(r10_0f, b10_0f);
136 __m128i rb10_8f = _mm_unpackhi_epi8(r10_0f, b10_0f);
137 __m128i ga10_07 = _mm_unpacklo_epi8(g10_0f, ALPHA);
138 __m128i ga10_8f = _mm_unpackhi_epi8(g10_0f, ALPHA);
139 __m128i rgba10_03 = _mm_unpacklo_epi8(rb10_07, ga10_07);
140 __m128i rgba10_47 = _mm_unpackhi_epi8(rb10_07, ga10_07);
141 __m128i rgba10_8b = _mm_unpacklo_epi8(rb10_8f, ga10_8f);
142 __m128i rgba10_cf = _mm_unpackhi_epi8(rb10_8f, ga10_8f);
143 _mm_store_si128(out1 + 0, rgba10_03);
144 _mm_store_si128(out1 + 1, rgba10_47);
145 _mm_store_si128(out1 + 2, rgba10_8b);
146 _mm_store_si128(out1 + 3, rgba10_cf);
149 __m128i u8f = _mm_unpackhi_epi8(u0f, ZERO);
150 __m128i v8f = _mm_unpackhi_epi8(v0f, ZERO);
151 __m128i mr8f = _mm_srai_epi16(_mm_mullo_epi16(v8f, RED_V), 6);
152 __m128i sg8f = _mm_mullo_epi16(v8f, GREEN_V);
153 __m128i tg8f = _mm_mullo_epi16(u8f, GREEN_U);
154 __m128i mg8f = _mm_srai_epi16(_mm_adds_epi16(sg8f, tg8f), 6);
155 __m128i mb8f = _mm_srli_epi16(_mm_mullo_epi16(u8f, BLUE_U), 6);
156 __m128i dr8f = _mm_adds_epi16(mr8f, CNST_R);
157 __m128i dg8f = _mm_adds_epi16(mg8f, CNST_G);
158 __m128i db8f = _mm_adds_epi16(mb8f, CNST_B);
161 __m128i y01_0f = _mm_load_si128(y0 + 1);
162 __m128i y01_even = _mm_and_si128(y01_0f, Y_MASK);
163 __m128i y01_odd = _mm_srli_epi16(y01_0f, 8);
164 __m128i dy01_even = _mm_srai_epi16(_mm_mullo_epi16(y01_even, COEF_Y), 6);
165 __m128i dy01_odd = _mm_srai_epi16(_mm_mullo_epi16(y01_odd, COEF_Y), 6);
166 __m128i r01_even = _mm_adds_epi16(dr8f, dy01_even);
167 __m128i g01_even = _mm_adds_epi16(dg8f, dy01_even);
168 __m128i b01_even = _mm_adds_epi16(db8f, dy01_even);
169 __m128i r01_odd = _mm_adds_epi16(dr8f, dy01_odd);
170 __m128i g01_odd = _mm_adds_epi16(dg8f, dy01_odd);
171 __m128i b01_odd = _mm_adds_epi16(db8f, dy01_odd);
172 __m128i r01_0f = _mm_unpackhi_epi8(_mm_packus_epi16(r01_even, r01_even),
173 _mm_packus_epi16(r01_odd, r01_odd));
174 __m128i g01_0f = _mm_unpackhi_epi8(_mm_packus_epi16(g01_even, g01_even),
175 _mm_packus_epi16(g01_odd, g01_odd));
176 __m128i b01_0f = _mm_unpackhi_epi8(_mm_packus_epi16(b01_even, b01_even),
177 _mm_packus_epi16(b01_odd, b01_odd));
178 __m128i rb01_07 = _mm_unpacklo_epi8(r01_0f, b01_0f);
179 __m128i rb01_8f = _mm_unpackhi_epi8(r01_0f, b01_0f);
180 __m128i ga01_07 = _mm_unpacklo_epi8(g01_0f, ALPHA);
181 __m128i ga01_8f = _mm_unpackhi_epi8(g01_0f, ALPHA);
182 __m128i rgba01_03 = _mm_unpacklo_epi8(rb01_07, ga01_07);
183 __m128i rgba01_47 = _mm_unpackhi_epi8(rb01_07, ga01_07);
184 __m128i rgba01_8b = _mm_unpacklo_epi8(rb01_8f, ga01_8f);
185 __m128i rgba01_cf = _mm_unpackhi_epi8(rb01_8f, ga01_8f);
186 _mm_store_si128(out0 + 4, rgba01_03);
187 _mm_store_si128(out0 + 5, rgba01_47);
188 _mm_store_si128(out0 + 6, rgba01_8b);
189 _mm_store_si128(out0 + 7, rgba01_cf);
192 __m128i y11_0f = _mm_load_si128(y1 + 1);
193 __m128i y11_even = _mm_and_si128(y11_0f, Y_MASK);
194 __m128i y11_odd = _mm_srli_epi16(y11_0f, 8);
195 __m128i dy11_even = _mm_srai_epi16(_mm_mullo_epi16(y11_even, COEF_Y), 6);
196 __m128i dy11_odd = _mm_srai_epi16(_mm_mullo_epi16(y11_odd, COEF_Y), 6);
197 __m128i r11_even = _mm_adds_epi16(dr8f, dy11_even);
198 __m128i g11_even = _mm_adds_epi16(dg8f, dy11_even);
199 __m128i b11_even = _mm_adds_epi16(db8f, dy11_even);
200 __m128i r11_odd = _mm_adds_epi16(dr8f, dy11_odd);
201 __m128i g11_odd = _mm_adds_epi16(dg8f, dy11_odd);
202 __m128i b11_odd = _mm_adds_epi16(db8f, dy11_odd);
203 __m128i r11_0f = _mm_unpackhi_epi8(_mm_packus_epi16(r11_even, r11_even),
204 _mm_packus_epi16(r11_odd, r11_odd));
205 __m128i g11_0f = _mm_unpackhi_epi8(_mm_packus_epi16(g11_even, g11_even),
206 _mm_packus_epi16(g11_odd, g11_odd));
207 __m128i b11_0f = _mm_unpackhi_epi8(_mm_packus_epi16(b11_even, b11_even),
208 _mm_packus_epi16(b11_odd, b11_odd));
209 __m128i rb11_07 = _mm_unpacklo_epi8(r11_0f, b11_0f);
210 __m128i rb11_8f = _mm_unpackhi_epi8(r11_0f, b11_0f);
211 __m128i ga11_07 = _mm_unpacklo_epi8(g11_0f, ALPHA);
212 __m128i ga11_8f = _mm_unpackhi_epi8(g11_0f, ALPHA);
213 __m128i rgba11_03 = _mm_unpacklo_epi8(rb11_07, ga11_07);
214 __m128i rgba11_47 = _mm_unpackhi_epi8(rb11_07, ga11_07);
215 __m128i rgba11_8b = _mm_unpacklo_epi8(rb11_8f, ga11_8f);
216 __m128i rgba11_cf = _mm_unpackhi_epi8(rb11_8f, ga11_8f);
217 _mm_store_si128(out1 + 4, rgba11_03);
218 _mm_store_si128(out1 + 5, rgba11_47);
219 _mm_store_si128(out1 + 6, rgba11_8b);
220 _mm_store_si128(out1 + 7, rgba11_cf);
223static inline void convertHelperSSE2(
224 const th_ycbcr_buffer& buffer,
RawFrame& output)
226 const int width = buffer[0].width;
227 const size_t y_stride = buffer[0].stride;
228 const size_t uv_stride2 = buffer[1].stride / 2;
230 assert((width % 32) == 0);
231 assert((buffer[0].height % 2) == 0);
233 for (
int y = 0; y < buffer[0].height; y += 2) {
234 const uint8_t* pY1 = buffer[0].data + (y + 0) * y_stride;
235 const uint8_t* pY2 = buffer[0].data + (y + 1) * y_stride;
236 const uint8_t* pCb = buffer[1].data + (y + 0) * uv_stride2;
237 const uint8_t* pCr = buffer[2].data + (y + 0) * uv_stride2;
241 for (
int x = 0; x < width; x += 32) {
243 yuv2rgb_sse2(pCb, pCr, pY1, pY2, &out0[x], &out1[x]);
257static constexpr int PREC = 15;
258static constexpr int COEF_Y = int(1.164 * (1 << PREC) + 0.5);
259static constexpr int COEF_RV = int(1.596 * (1 << PREC) + 0.5);
260static constexpr int COEF_GU = int(0.391 * (1 << PREC) + 0.5);
261static constexpr int COEF_GV = int(0.813 * (1 << PREC) + 0.5);
262static constexpr int COEF_BU = int(2.018 * (1 << PREC) + 0.5);
265 std::array<int, 256>
gu;
266 std::array<int, 256>
gv;
267 std::array<int, 256>
bu;
268 std::array<int, 256>
rv;
269 std::array<int, 256>
y;
272[[nodiscard]]
static constexpr Coefs getCoefs()
275 for (
auto i :
xrange(256)) {
276 coefs.
gu[i] = -COEF_GU * (i - 128);
277 coefs.
gv[i] = -COEF_GV * (i - 128);
278 coefs.
bu[i] = COEF_BU * (i - 128);
279 coefs.
rv[i] = COEF_RV * (i - 128);
280 coefs.
y[i] = COEF_Y * (i - 16) + (PREC / 2);
285[[nodiscard]]
static inline Pixel calc(
286 int y,
int ruv,
int guv,
int buv)
291 return (r << 0) | (
g << 8) | (b << 16);
294static void convertHelper(
const th_ycbcr_buffer& buffer, RawFrame& output)
296 assert(buffer[1].width * 2 == buffer[0].width);
297 assert(buffer[1].height * 2 == buffer[0].height);
299 static constexpr Coefs coefs = getCoefs();
301 const int width = buffer[0].width;
302 const size_t y_stride = buffer[0].stride;
303 const size_t uv_stride2 = buffer[1].stride / 2;
305 for (
int y = 0; y < buffer[0].height; y += 2) {
306 const uint8_t* pY = buffer[0].data + y * y_stride;
307 const uint8_t* pCb = buffer[1].data + y * uv_stride2;
308 const uint8_t* pCr = buffer[2].data + y * uv_stride2;
309 auto out0 = output.getLineDirect(y + 0);
310 auto out1 = output.getLineDirect(y + 1);
312 for (
int x = 0; x < width; x += 2, pY += 2, ++pCr, ++pCb) {
313 int ruv = coefs.rv[*pCr];
314 int guv = coefs.gu[*pCb] + coefs.gv[*pCr];
315 int buv = coefs.bu[*pCb];
317 int Y00 = coefs.y[pY[0]];
318 out0[x + 0] = calc(Y00, ruv, guv, buv);
320 int Y01 = coefs.y[pY[1]];
321 out0[x + 1] = calc(Y01, ruv, guv, buv);
323 int Y10 = coefs.y[pY[y_stride + 0]];
324 out1[x + 0] = calc(Y10, ruv, guv, buv);
326 int Y11 = coefs.y[pY[y_stride + 1]];
327 out1[x + 1] = calc(Y11, ruv, guv, buv);
330 output.setLineWidth(y + 0, width);
331 output.setLineWidth(y + 1, width);
338 convertHelperSSE2(input, output);
341 convertHelper(input, output);
A video frame as output by the VDP scanline conversion unit, before any postprocessing filters are ap...
std::span< Pixel > getLineDirect(unsigned y)
void setLineWidth(unsigned line, unsigned width)
uint8_t clipIntToByte(int x)
Clip x to range [0,255].
void convert(const th_ycbcr_buffer &input, RawFrame &output)
std::array< int, 256 > gu
std::array< int, 256 > gv
std::array< int, 256 > rv
std::array< int, 256 > bu
constexpr auto xrange(T e)