10 #include <emmintrin.h>
42 static inline void yuv2rgb_sse2(
43 const uint8_t* u_ ,
const uint8_t* v_,
44 const uint8_t* y0_,
const uint8_t* y1_,
45 uint32_t* out0_, uint32_t* out1_)
50 const auto* u =
reinterpret_cast<const __m128i*
>(u_);
51 const auto* v =
reinterpret_cast<const __m128i*
>(v_);
52 const auto* y0 =
reinterpret_cast<const __m128i*
>(y0_);
53 const auto* y1 =
reinterpret_cast<const __m128i*
>(y1_);
54 auto* out0 =
reinterpret_cast< __m128i*
>(out0_);
55 auto* out1 =
reinterpret_cast< __m128i*
>(out1_);
58 const __m128i ZERO = _mm_setzero_si128();
59 const __m128i ALPHA = _mm_set1_epi16( -1);
60 const __m128i RED_V = _mm_set1_epi16( 102);
61 const __m128i GREEN_U = _mm_set1_epi16( -25);
62 const __m128i GREEN_V = _mm_set1_epi16( -52);
63 const __m128i BLUE_U = _mm_set1_epi16( 129);
64 const __m128i
COEF_Y = _mm_set1_epi16( 74);
65 const __m128i CNST_R = _mm_set1_epi16( -223);
66 const __m128i CNST_G = _mm_set1_epi16( 136);
67 const __m128i CNST_B = _mm_set1_epi16( -277);
68 const __m128i Y_MASK = _mm_set1_epi16(0x00FF);
71 __m128i u0f = _mm_load_si128(u);
72 __m128i v0f = _mm_load_si128(v);
73 __m128i u07 = _mm_unpacklo_epi8(u0f, ZERO);
74 __m128i v07 = _mm_unpacklo_epi8(v0f, ZERO);
75 __m128i mr07 = _mm_srai_epi16(_mm_mullo_epi16(v07, RED_V), 6);
76 __m128i sg07 = _mm_mullo_epi16(v07, GREEN_V);
77 __m128i tg07 = _mm_mullo_epi16(u07, GREEN_U);
78 __m128i mg07 = _mm_srai_epi16(_mm_adds_epi16(sg07, tg07), 6);
79 __m128i mb07 = _mm_srli_epi16(_mm_mullo_epi16(u07, BLUE_U), 6);
80 __m128i dr07 = _mm_adds_epi16(mr07, CNST_R);
81 __m128i dg07 = _mm_adds_epi16(mg07, CNST_G);
82 __m128i db07 = _mm_adds_epi16(mb07, CNST_B);
85 __m128i y00_0f = _mm_load_si128(y0 + 0);
86 __m128i y00_even = _mm_and_si128(y00_0f, Y_MASK);
87 __m128i y00_odd = _mm_srli_epi16(y00_0f, 8);
88 __m128i dy00_even = _mm_srai_epi16(_mm_mullo_epi16(y00_even,
COEF_Y), 6);
89 __m128i dy00_odd = _mm_srai_epi16(_mm_mullo_epi16(y00_odd,
COEF_Y), 6);
90 __m128i r00_even = _mm_adds_epi16(dr07, dy00_even);
91 __m128i g00_even = _mm_adds_epi16(dg07, dy00_even);
92 __m128i b00_even = _mm_adds_epi16(db07, dy00_even);
93 __m128i r00_odd = _mm_adds_epi16(dr07, dy00_odd);
94 __m128i g00_odd = _mm_adds_epi16(dg07, dy00_odd);
95 __m128i b00_odd = _mm_adds_epi16(db07, dy00_odd);
96 __m128i r00_0f = _mm_unpackhi_epi8(_mm_packus_epi16(r00_even, r00_even),
97 _mm_packus_epi16(r00_odd, r00_odd));
98 __m128i g00_0f = _mm_unpackhi_epi8(_mm_packus_epi16(g00_even, g00_even),
99 _mm_packus_epi16(g00_odd, g00_odd));
100 __m128i b00_0f = _mm_unpackhi_epi8(_mm_packus_epi16(b00_even, b00_even),
101 _mm_packus_epi16(b00_odd, b00_odd));
102 __m128i rb00_07 = _mm_unpacklo_epi8(r00_0f, b00_0f);
103 __m128i rb00_8f = _mm_unpackhi_epi8(r00_0f, b00_0f);
104 __m128i ga00_07 = _mm_unpacklo_epi8(g00_0f, ALPHA);
105 __m128i ga00_8f = _mm_unpackhi_epi8(g00_0f, ALPHA);
106 __m128i rgba00_03 = _mm_unpacklo_epi8(rb00_07, ga00_07);
107 __m128i rgba00_47 = _mm_unpackhi_epi8(rb00_07, ga00_07);
108 __m128i rgba00_8b = _mm_unpacklo_epi8(rb00_8f, ga00_8f);
109 __m128i rgba00_cf = _mm_unpackhi_epi8(rb00_8f, ga00_8f);
110 _mm_store_si128(out0 + 0, rgba00_03);
111 _mm_store_si128(out0 + 1, rgba00_47);
112 _mm_store_si128(out0 + 2, rgba00_8b);
113 _mm_store_si128(out0 + 3, rgba00_cf);
116 __m128i y10_0f = _mm_load_si128(y1 + 0);
117 __m128i y10_even = _mm_and_si128(y10_0f, Y_MASK);
118 __m128i y10_odd = _mm_srli_epi16(y10_0f, 8);
119 __m128i dy10_even = _mm_srai_epi16(_mm_mullo_epi16(y10_even,
COEF_Y), 6);
120 __m128i dy10_odd = _mm_srai_epi16(_mm_mullo_epi16(y10_odd,
COEF_Y), 6);
121 __m128i r10_even = _mm_adds_epi16(dr07, dy10_even);
122 __m128i g10_even = _mm_adds_epi16(dg07, dy10_even);
123 __m128i b10_even = _mm_adds_epi16(db07, dy10_even);
124 __m128i r10_odd = _mm_adds_epi16(dr07, dy10_odd);
125 __m128i g10_odd = _mm_adds_epi16(dg07, dy10_odd);
126 __m128i b10_odd = _mm_adds_epi16(db07, dy10_odd);
127 __m128i r10_0f = _mm_unpackhi_epi8(_mm_packus_epi16(r10_even, r10_even),
128 _mm_packus_epi16(r10_odd, r10_odd));
129 __m128i g10_0f = _mm_unpackhi_epi8(_mm_packus_epi16(g10_even, g10_even),
130 _mm_packus_epi16(g10_odd, g10_odd));
131 __m128i b10_0f = _mm_unpackhi_epi8(_mm_packus_epi16(b10_even, b10_even),
132 _mm_packus_epi16(b10_odd, b10_odd));
133 __m128i rb10_07 = _mm_unpacklo_epi8(r10_0f, b10_0f);
134 __m128i rb10_8f = _mm_unpackhi_epi8(r10_0f, b10_0f);
135 __m128i ga10_07 = _mm_unpacklo_epi8(g10_0f, ALPHA);
136 __m128i ga10_8f = _mm_unpackhi_epi8(g10_0f, ALPHA);
137 __m128i rgba10_03 = _mm_unpacklo_epi8(rb10_07, ga10_07);
138 __m128i rgba10_47 = _mm_unpackhi_epi8(rb10_07, ga10_07);
139 __m128i rgba10_8b = _mm_unpacklo_epi8(rb10_8f, ga10_8f);
140 __m128i rgba10_cf = _mm_unpackhi_epi8(rb10_8f, ga10_8f);
141 _mm_store_si128(out1 + 0, rgba10_03);
142 _mm_store_si128(out1 + 1, rgba10_47);
143 _mm_store_si128(out1 + 2, rgba10_8b);
144 _mm_store_si128(out1 + 3, rgba10_cf);
147 __m128i u8f = _mm_unpackhi_epi8(u0f, ZERO);
148 __m128i v8f = _mm_unpackhi_epi8(v0f, ZERO);
149 __m128i mr8f = _mm_srai_epi16(_mm_mullo_epi16(v8f, RED_V), 6);
150 __m128i sg8f = _mm_mullo_epi16(v8f, GREEN_V);
151 __m128i tg8f = _mm_mullo_epi16(u8f, GREEN_U);
152 __m128i mg8f = _mm_srai_epi16(_mm_adds_epi16(sg8f, tg8f), 6);
153 __m128i mb8f = _mm_srli_epi16(_mm_mullo_epi16(u8f, BLUE_U), 6);
154 __m128i dr8f = _mm_adds_epi16(mr8f, CNST_R);
155 __m128i dg8f = _mm_adds_epi16(mg8f, CNST_G);
156 __m128i db8f = _mm_adds_epi16(mb8f, CNST_B);
159 __m128i y01_0f = _mm_load_si128(y0 + 1);
160 __m128i y01_even = _mm_and_si128(y01_0f, Y_MASK);
161 __m128i y01_odd = _mm_srli_epi16(y01_0f, 8);
162 __m128i dy01_even = _mm_srai_epi16(_mm_mullo_epi16(y01_even,
COEF_Y), 6);
163 __m128i dy01_odd = _mm_srai_epi16(_mm_mullo_epi16(y01_odd,
COEF_Y), 6);
164 __m128i r01_even = _mm_adds_epi16(dr8f, dy01_even);
165 __m128i g01_even = _mm_adds_epi16(dg8f, dy01_even);
166 __m128i b01_even = _mm_adds_epi16(db8f, dy01_even);
167 __m128i r01_odd = _mm_adds_epi16(dr8f, dy01_odd);
168 __m128i g01_odd = _mm_adds_epi16(dg8f, dy01_odd);
169 __m128i b01_odd = _mm_adds_epi16(db8f, dy01_odd);
170 __m128i r01_0f = _mm_unpackhi_epi8(_mm_packus_epi16(r01_even, r01_even),
171 _mm_packus_epi16(r01_odd, r01_odd));
172 __m128i g01_0f = _mm_unpackhi_epi8(_mm_packus_epi16(g01_even, g01_even),
173 _mm_packus_epi16(g01_odd, g01_odd));
174 __m128i b01_0f = _mm_unpackhi_epi8(_mm_packus_epi16(b01_even, b01_even),
175 _mm_packus_epi16(b01_odd, b01_odd));
176 __m128i rb01_07 = _mm_unpacklo_epi8(r01_0f, b01_0f);
177 __m128i rb01_8f = _mm_unpackhi_epi8(r01_0f, b01_0f);
178 __m128i ga01_07 = _mm_unpacklo_epi8(g01_0f, ALPHA);
179 __m128i ga01_8f = _mm_unpackhi_epi8(g01_0f, ALPHA);
180 __m128i rgba01_03 = _mm_unpacklo_epi8(rb01_07, ga01_07);
181 __m128i rgba01_47 = _mm_unpackhi_epi8(rb01_07, ga01_07);
182 __m128i rgba01_8b = _mm_unpacklo_epi8(rb01_8f, ga01_8f);
183 __m128i rgba01_cf = _mm_unpackhi_epi8(rb01_8f, ga01_8f);
184 _mm_store_si128(out0 + 4, rgba01_03);
185 _mm_store_si128(out0 + 5, rgba01_47);
186 _mm_store_si128(out0 + 6, rgba01_8b);
187 _mm_store_si128(out0 + 7, rgba01_cf);
190 __m128i y11_0f = _mm_load_si128(y1 + 1);
191 __m128i y11_even = _mm_and_si128(y11_0f, Y_MASK);
192 __m128i y11_odd = _mm_srli_epi16(y11_0f, 8);
193 __m128i dy11_even = _mm_srai_epi16(_mm_mullo_epi16(y11_even,
COEF_Y), 6);
194 __m128i dy11_odd = _mm_srai_epi16(_mm_mullo_epi16(y11_odd,
COEF_Y), 6);
195 __m128i r11_even = _mm_adds_epi16(dr8f, dy11_even);
196 __m128i g11_even = _mm_adds_epi16(dg8f, dy11_even);
197 __m128i b11_even = _mm_adds_epi16(db8f, dy11_even);
198 __m128i r11_odd = _mm_adds_epi16(dr8f, dy11_odd);
199 __m128i g11_odd = _mm_adds_epi16(dg8f, dy11_odd);
200 __m128i b11_odd = _mm_adds_epi16(db8f, dy11_odd);
201 __m128i r11_0f = _mm_unpackhi_epi8(_mm_packus_epi16(r11_even, r11_even),
202 _mm_packus_epi16(r11_odd, r11_odd));
203 __m128i g11_0f = _mm_unpackhi_epi8(_mm_packus_epi16(g11_even, g11_even),
204 _mm_packus_epi16(g11_odd, g11_odd));
205 __m128i b11_0f = _mm_unpackhi_epi8(_mm_packus_epi16(b11_even, b11_even),
206 _mm_packus_epi16(b11_odd, b11_odd));
207 __m128i rb11_07 = _mm_unpacklo_epi8(r11_0f, b11_0f);
208 __m128i rb11_8f = _mm_unpackhi_epi8(r11_0f, b11_0f);
209 __m128i ga11_07 = _mm_unpacklo_epi8(g11_0f, ALPHA);
210 __m128i ga11_8f = _mm_unpackhi_epi8(g11_0f, ALPHA);
211 __m128i rgba11_03 = _mm_unpacklo_epi8(rb11_07, ga11_07);
212 __m128i rgba11_47 = _mm_unpackhi_epi8(rb11_07, ga11_07);
213 __m128i rgba11_8b = _mm_unpacklo_epi8(rb11_8f, ga11_8f);
214 __m128i rgba11_cf = _mm_unpackhi_epi8(rb11_8f, ga11_8f);
215 _mm_store_si128(out1 + 4, rgba11_03);
216 _mm_store_si128(out1 + 5, rgba11_47);
217 _mm_store_si128(out1 + 6, rgba11_8b);
218 _mm_store_si128(out1 + 7, rgba11_cf);
221 static inline void convertHelperSSE2(
222 const th_ycbcr_buffer& buffer,
RawFrame& output)
224 const int width = buffer[0].width;
225 const int y_stride = buffer[0].stride;
226 const int uv_stride2 = buffer[1].stride / 2;
228 assert((width % 32) == 0);
229 assert((buffer[0].height % 2) == 0);
231 for (
int y = 0; y < buffer[0].height; y += 2) {
232 const uint8_t* pY1 = buffer[0].data + y * y_stride;
233 const uint8_t* pY2 = buffer[0].data + (y + 1) * y_stride;
234 const uint8_t* pCb = buffer[1].data + y * uv_stride2;
235 const uint8_t* pCr = buffer[2].data + y * uv_stride2;
239 for (
int x = 0;
x < width;
x += 32) {
241 yuv2rgb_sse2(pCb, pCr, pY1, pY2, out0, out1);
272 [[nodiscard]]
static constexpr
Coefs getCoefs()
275 for (
auto i :
xrange(256)) {
285 template<std::
unsigned_
integral Pixel>
286 [[nodiscard]]
static inline Pixel calc(
287 const PixelFormat& format,
int y,
int ruv,
int guv,
int buv)
292 if constexpr (
sizeof(
Pixel) == 4) {
293 return (r << 0) | (
g << 8) | (b << 16);
299 template<std::
unsigned_
integral Pixel>
300 static void convertHelper(
const th_ycbcr_buffer& buffer, RawFrame& output,
301 const PixelFormat& format)
303 assert(buffer[1].width * 2 == buffer[0].width);
304 assert(buffer[1].height * 2 == buffer[0].height);
306 static constexpr Coefs coefs = getCoefs();
308 const int width = buffer[0].width;
309 const int y_stride = buffer[0].stride;
310 const int uv_stride2 = buffer[1].stride / 2;
312 for (
int y = 0; y < buffer[0].height; y += 2) {
313 const uint8_t* pY = buffer[0].data + y * y_stride;
314 const uint8_t* pCb = buffer[1].data + y * uv_stride2;
315 const uint8_t* pCr = buffer[2].data + y * uv_stride2;
316 auto* out0 = output.getLinePtrDirect<
Pixel>(y + 0);
317 auto* out1 = output.getLinePtrDirect<
Pixel>(y + 1);
319 for (
int x = 0;
x < width;
320 x += 2, pY += 2, ++pCr, ++pCb, out0 += 2, out1 += 2) {
321 int ruv = coefs.rv[*pCr];
322 int guv = coefs.gu[*pCb] + coefs.gv[*pCr];
323 int buv = coefs.bu[*pCb];
325 int Y00 = coefs.y[pY[0]];
326 out0[0] = calc<Pixel>(format, Y00, ruv, guv, buv);
328 int Y01 = coefs.y[pY[1]];
329 out0[1] = calc<Pixel>(format, Y01, ruv, guv, buv);
331 int Y10 = coefs.y[pY[y_stride + 0]];
332 out1[0] = calc<Pixel>(format, Y10, ruv, guv, buv);
334 int Y11 = coefs.y[pY[y_stride + 1]];
335 out1[1] = calc<Pixel>(format, Y11, ruv, guv, buv);
338 output.setLineWidth(y + 0, width);
339 output.setLineWidth(y + 1, width);
346 if (
format.getBytesPerPixel() == 4) {
348 convertHelperSSE2(input, output);
350 convertHelper<uint32_t>(input, output,
format);
353 assert(
format.getBytesPerPixel() == 2);
354 convertHelper<uint16_t>(input, output,
format);
const PixelFormat & getPixelFormat() const
A video frame as output by the VDP scanline conversion unit, before any postprocessing filters are ap...
Pixel * getLinePtrDirect(unsigned y)
void setLineWidth(unsigned line, unsigned width)
uint8_t clipIntToByte(int x)
Clip x to range [0,255].
void format(SectorAccessibleDisk &disk, bool dos1)
Format the given disk (= a single partition).
void convert(const th_ycbcr_buffer &input, RawFrame &output)
constexpr KeyMatrixPosition x
Keyboard bindings.
constexpr auto xrange(T e)