39 std::array<CodecVector, VECTOR_TAB_SIZE> result = {};
46 for (
int i = 1; i <= int(
MAX_VECTOR); ++i, p += 8) {
47 result[p + 0] = {int8_t( i), int8_t( 0)};
48 result[p + 1] = {int8_t(-i), int8_t( 0)};
49 result[p + 2] = {int8_t( 0), int8_t( i)};
50 result[p + 3] = {int8_t( 0), int8_t(-i)};
51 result[p + 4] = {int8_t( i), int8_t( i)};
52 result[p + 5] = {int8_t(-i), int8_t( i)};
53 result[p + 6] = {int8_t( i), int8_t(-i)};
54 result[p + 7] = {int8_t(-i), int8_t(-i)};
57 for (
int y = 1; y <= int(
MAX_VECTOR / 2); ++y) {
60 result[p + 0] = {int8_t(
x), int8_t( y)};
61 result[p + 1] = {int8_t(-
x), int8_t( y)};
62 result[p + 2] = {int8_t(
x), int8_t(-y)};
63 result[p + 3] = {int8_t(-
x), int8_t(-y)};
72 auto c =
cstd::sqrt(
double(v.x * v.x + v.y * v.y));
73 if ((v.x == 0) || (v.y == 0)) {
85 return std::tuple(cost(l), l.
x, l.
y) <
86 std::tuple(cost(r), r.x, r.y);
107 unsigned r = pixelOps.
red256(pixel);
109 unsigned b = pixelOps.
blue256(pixel);
110 dest = ((r & 0xF8) << (11 - 3)) | ((
g & 0xFC) << (5 - 2)) | (b >> 3);
114 const PixelOperations<unsigned>& pixelOps,
117 unsigned r = pixelOps.red256(pixel);
118 unsigned g = pixelOps.green256(pixel);
119 unsigned b = pixelOps.blue256(pixel);
120 dest = (r << 16) | (
g << 8) | b;
129 memset(&zstream, 0,
sizeof(zstream));
130 deflateInit(&zstream, 6);
151 void ZMBVEncoder::setupBuffers(
unsigned bpp)
157 format = ZMBV_FORMAT_16BPP;
163 format = ZMBV_FORMAT_32BPP;
172 unsigned bufsize = (height + 2 *
MAX_VECTOR) * pitch * pixelSize + 2048;
176 memset(oldframe.
data(), 0, bufsize);
177 memset(newframe.
data(), 0, bufsize);
179 outputSize = neededSize();
180 output.
resize(outputSize);
186 blockOffsets.
resize(xBlocks * yBlocks);
187 for (
auto y :
xrange(yBlocks)) {
188 for (
auto x :
xrange(xBlocks)) {
189 blockOffsets[y * xBlocks +
x] =
196 unsigned ZMBVEncoder::neededSize()
const
198 unsigned f = pixelSize;
199 f = f * width * height + 2 * (1 + (width / 8)) * (1 + (height / 8)) + 1024;
204 unsigned ZMBVEncoder::possibleBlock(
int vx,
int vy,
unsigned offset)
207 auto* pOld = &(
reinterpret_cast<P*
>(oldframe.
data()))[offset + (vy * pitch) + vx];
208 auto* pNew = &(
reinterpret_cast<P*
>(newframe.
data()))[offset];
211 if (pOld[
x] != pNew[
x]) ++ret;
220 unsigned ZMBVEncoder::compareBlock(
int vx,
int vy,
unsigned offset)
223 auto* pOld = &(
reinterpret_cast<P*
>(oldframe.
data()))[offset + (vy * pitch) + vx];
224 auto* pNew = &(
reinterpret_cast<P*
>(newframe.
data()))[offset];
227 if (pOld[
x] != pNew[
x]) ++ret;
236 void ZMBVEncoder::addXorBlock(
237 const PixelOperations<P>& pixelOps,
int vx,
int vy,
unsigned offset,
unsigned& workUsed)
241 auto* pOld = &(
reinterpret_cast<P*
>(oldframe.
data()))[offset + (vy * pitch) + vx];
242 auto* pNew = &(
reinterpret_cast<P*
>(newframe.
data()))[offset];
245 P pXor = pNew[
x] ^ pOld[
x];
246 writePixel(pixelOps, pXor, *
reinterpret_cast<LE_P*
>(&work[workUsed]));
247 workUsed +=
sizeof(P);
255 void ZMBVEncoder::addXorFrame(
const PixelFormat& pixelFormat,
unsigned& workUsed)
257 PixelOperations<P> pixelOps(pixelFormat);
258 auto* vectors =
reinterpret_cast<int8_t*
>(&work[workUsed]);
262 unsigned blockcount = xBlocks * yBlocks;
265 workUsed = (workUsed + blockcount * 2 + 3) & ~3;
269 for (
auto b :
xrange(blockcount)) {
270 unsigned offset = blockOffsets[b];
272 unsigned bestchange = compareBlock<P>(bestVx, bestVy, offset);
273 if (bestchange >= 4) {
276 if (possibleBlock<P>(v.x, v.y, offset) < 4) {
277 unsigned testchange = compareBlock<P>(v.x, v.y, offset);
278 if (testchange < bestchange) {
279 bestchange = testchange;
282 if (bestchange < 4)
break;
285 if (possibles == 0)
break;
289 vectors[b * 2 + 0] = (bestVx << 1);
290 vectors[b * 2 + 1] = (bestVy << 1);
292 vectors[b * 2 + 0] |= 1;
293 addXorBlock<P>(pixelOps, bestVx, bestVy, offset, workUsed);
299 void ZMBVEncoder::addFullFrame(
const PixelFormat& pixelFormat,
unsigned& workUsed)
303 PixelOperations<P> pixelOps(pixelFormat);
307 auto* pixelsIn =
reinterpret_cast<P*
> (readFrame);
308 auto* pixelsOut =
reinterpret_cast<LE_P*
>(&work[workUsed]);
312 readFrame += pitch *
sizeof(P);
313 workUsed += width *
sizeof(P);
317 const void* ZMBVEncoder::getScaledLine(FrameSource* frame,
unsigned y,
void* workBuf_)
const
320 if (pixelSize == 4) {
321 auto* workBuf =
static_cast<uint32_t*
>(workBuf_);
324 return frame->getLinePtr320_240(y, workBuf);
326 return frame->getLinePtr640_480(y, workBuf);
328 return frame->getLinePtr960_720(y, workBuf);
335 if (pixelSize == 2) {
336 auto* workBuf =
static_cast<uint16_t*
>(workBuf_);
339 return frame->getLinePtr320_240(y, workBuf);
341 return frame->getLinePtr640_480(y, workBuf);
343 return frame->getLinePtr960_720(y, workBuf);
355 std::swap(newframe, oldframe);
358 unsigned workUsed = 0;
359 unsigned writeDone = 1;
360 uint8_t* writeBuf = output.
data();
366 writeBuf + writeDone);
370 header->format = format;
374 deflateReset(&zstream);
378 unsigned linePitch = pitch * pixelSize;
379 unsigned lineWidth = width * pixelSize;
382 for (
auto i :
xrange(height)) {
383 const auto* scaled = getScaledLine(frame, i, dest);
384 if (scaled != dest) memcpy(dest, scaled, lineWidth);
423 zstream.next_in = work.
data();
424 zstream.avail_in = workUsed;
425 zstream.total_in = 0;
427 zstream.next_out =
static_cast<Bytef*
>(writeBuf + writeDone);
428 zstream.avail_out = outputSize - writeDone;
429 zstream.total_out = 0;
430 auto r = deflate(&zstream, Z_SYNC_FLUSH);
431 assert(r == Z_OK); (void)r;
433 return {output.
data(), writeDone + zstream.total_out};