Bug Summary

File:pixman/pixman-fast-path.c
Location:line 375, column 5
Description:Value stored to 'srca' is never read

Annotated Source Code

1/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
2/*
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
5 *
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of SuSE not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. SuSE makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
15 *
16 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 *
23 * Author: Keith Packard, SuSE, Inc.
24 */
25
26#ifdef HAVE_CONFIG_H1
27#include <config.h>
28#endif
29#include <string.h>
30#include <stdlib.h>
31#include "pixman-private.h"
32#include "pixman-combine32.h"
33#include "pixman-fast-path.h"
34
35static force_inline__inline__ __attribute__ ((__always_inline__)) uint32_t
36fetch_24 (uint8_t *a)
37{
38 if (((unsigned long)a) & 1)
39 {
40#ifdef WORDS_BIGENDIAN
41 return (*a << 16) | (*(uint16_t *)(a + 1));
42#else
43 return *a | (*(uint16_t *)(a + 1) << 8);
44#endif
45 }
46 else
47 {
48#ifdef WORDS_BIGENDIAN
49 return (*(uint16_t *)a << 8) | *(a + 2);
50#else
51 return *(uint16_t *)a | (*(a + 2) << 16);
52#endif
53 }
54}
55
56static force_inline__inline__ __attribute__ ((__always_inline__)) void
57store_24 (uint8_t *a,
58 uint32_t v)
59{
60 if (((unsigned long)a) & 1)
61 {
62#ifdef WORDS_BIGENDIAN
63 *a = (uint8_t) (v >> 16);
64 *(uint16_t *)(a + 1) = (uint16_t) (v);
65#else
66 *a = (uint8_t) (v);
67 *(uint16_t *)(a + 1) = (uint16_t) (v >> 8);
68#endif
69 }
70 else
71 {
72#ifdef WORDS_BIGENDIAN
73 *(uint16_t *)a = (uint16_t)(v >> 8);
74 *(a + 2) = (uint8_t)v;
75#else
76 *(uint16_t *)a = (uint16_t)v;
77 *(a + 2) = (uint8_t)(v >> 16);
78#endif
79 }
80}
81
82static force_inline__inline__ __attribute__ ((__always_inline__)) uint32_t
83over (uint32_t src,
84 uint32_t dest)
85{
86 uint32_t a = ~src >> 24;
87
88 UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src)do { uint32_t r1__, r2__, r3__, t__; r1__ = (dest); r2__ = (src
) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a));
t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
dest) >> 8; r3__ = ((src) >> 8) & 0xff00ff; do
{ t__ = ((r2__) & 0xff00ff) * ((a)); t__ += 0x800080; r2__
= (t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__
&= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__
|= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__ = (t__
& 0xff00ff); } while (0); (dest) = r1__ | (r2__ <<
8); } while (0)
;
89
90 return dest;
91}
92
93static uint32_t
94in (uint32_t x,
95 uint8_t y)
96{
97 uint16_t a = y;
98
99 UN8x4_MUL_UN8 (x, a)do { uint32_t r1__, r2__, t__; r1__ = (x); do { t__ = ((r1__)
& 0xff00ff) * ((a)); t__ += 0x800080; r1__ = (t__ + ((t__
>> 8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff
; } while (0); r2__ = (x) >> 8; do { t__ = ((r2__) &
0xff00ff) * ((a)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); (x) = r1__ | (r2__ << 8); } while (0)
;
100
101 return x;
102}
103
104/*
105 * Naming convention:
106 *
107 * op_src_mask_dest
108 */
109static void
110fast_composite_over_x888_8_8888 (pixman_implementation_t *imp,
111 pixman_op_t op,
112 pixman_image_t * src_image,
113 pixman_image_t * mask_image,
114 pixman_image_t * dst_image,
115 int32_t src_x,
116 int32_t src_y,
117 int32_t mask_x,
118 int32_t mask_y,
119 int32_t dest_x,
120 int32_t dest_y,
121 int32_t width,
122 int32_t height)
123{
124 uint32_t *src, *src_line;
125 uint32_t *dst, *dst_line;
126 uint8_t *mask, *mask_line;
127 int src_stride, mask_stride, dst_stride;
128 uint8_t m;
129 uint32_t s, d;
130 int32_t w;
131
132 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
133 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
134 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
135
136 while (height--)
137 {
138 src = src_line;
139 src_line += src_stride;
140 dst = dst_line;
141 dst_line += dst_stride;
142 mask = mask_line;
143 mask_line += mask_stride;
144
145 w = width;
146 while (w--)
147 {
148 m = *mask++;
149 if (m)
150 {
151 s = *src | 0xff000000;
152
153 if (m == 0xff)
154 {
155 *dst = s;
156 }
157 else
158 {
159 d = in (s, m);
160 *dst = over (d, *dst);
161 }
162 }
163 src++;
164 dst++;
165 }
166 }
167}
168
169static void
170fast_composite_in_n_8_8 (pixman_implementation_t *imp,
171 pixman_op_t op,
172 pixman_image_t * src_image,
173 pixman_image_t * mask_image,
174 pixman_image_t * dest_image,
175 int32_t src_x,
176 int32_t src_y,
177 int32_t mask_x,
178 int32_t mask_y,
179 int32_t dest_x,
180 int32_t dest_y,
181 int32_t width,
182 int32_t height)
183{
184 uint32_t src, srca;
185 uint8_t *dst_line, *dst;
186 uint8_t *mask_line, *mask, m;
187 int dst_stride, mask_stride;
188 int32_t w;
189 uint16_t t;
190
191 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
192
193 srca = src >> 24;
194
195 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
196 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
197
198 if (srca == 0xff)
199 {
200 while (height--)
201 {
202 dst = dst_line;
203 dst_line += dst_stride;
204 mask = mask_line;
205 mask_line += mask_stride;
206 w = width;
207
208 while (w--)
209 {
210 m = *mask++;
211
212 if (m == 0)
213 *dst = 0;
214 else if (m != 0xff)
215 *dst = MUL_UN8 (m, *dst, t)((t) = (m) * (*dst) + 0x80, ((((t) >> 8 ) + (t) ) >>
8 ))
;
216
217 dst++;
218 }
219 }
220 }
221 else
222 {
223 while (height--)
224 {
225 dst = dst_line;
226 dst_line += dst_stride;
227 mask = mask_line;
228 mask_line += mask_stride;
229 w = width;
230
231 while (w--)
232 {
233 m = *mask++;
234 m = MUL_UN8 (m, srca, t)((t) = (m) * (srca) + 0x80, ((((t) >> 8 ) + (t) ) >>
8 ))
;
235
236 if (m == 0)
237 *dst = 0;
238 else if (m != 0xff)
239 *dst = MUL_UN8 (m, *dst, t)((t) = (m) * (*dst) + 0x80, ((((t) >> 8 ) + (t) ) >>
8 ))
;
240
241 dst++;
242 }
243 }
244 }
245}
246
247static void
248fast_composite_in_8_8 (pixman_implementation_t *imp,
249 pixman_op_t op,
250 pixman_image_t * src_image,
251 pixman_image_t * mask_image,
252 pixman_image_t * dest_image,
253 int32_t src_x,
254 int32_t src_y,
255 int32_t mask_x,
256 int32_t mask_y,
257 int32_t dest_x,
258 int32_t dest_y,
259 int32_t width,
260 int32_t height)
261{
262 uint8_t *dst_line, *dst;
263 uint8_t *src_line, *src;
264 int dst_stride, src_stride;
265 int32_t w;
266 uint8_t s;
267 uint16_t t;
268
269 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (src_line) = ((uint8_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
270 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
271
272 while (height--)
273 {
274 dst = dst_line;
275 dst_line += dst_stride;
276 src = src_line;
277 src_line += src_stride;
278 w = width;
279
280 while (w--)
281 {
282 s = *src++;
283
284 if (s == 0)
285 *dst = 0;
286 else if (s != 0xff)
287 *dst = MUL_UN8 (s, *dst, t)((t) = (s) * (*dst) + 0x80, ((((t) >> 8 ) + (t) ) >>
8 ))
;
288
289 dst++;
290 }
291 }
292}
293
294static void
295fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
296 pixman_op_t op,
297 pixman_image_t * src_image,
298 pixman_image_t * mask_image,
299 pixman_image_t * dst_image,
300 int32_t src_x,
301 int32_t src_y,
302 int32_t mask_x,
303 int32_t mask_y,
304 int32_t dest_x,
305 int32_t dest_y,
306 int32_t width,
307 int32_t height)
308{
309 uint32_t src, srca;
310 uint32_t *dst_line, *dst, d;
311 uint8_t *mask_line, *mask, m;
312 int dst_stride, mask_stride;
313 int32_t w;
314
315 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
316
317 srca = src >> 24;
318 if (src == 0)
319 return;
320
321 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
322 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
323
324 while (height--)
325 {
326 dst = dst_line;
327 dst_line += dst_stride;
328 mask = mask_line;
329 mask_line += mask_stride;
330 w = width;
331
332 while (w--)
333 {
334 m = *mask++;
335 if (m == 0xff)
336 {
337 if (srca == 0xff)
338 *dst = src;
339 else
340 *dst = over (src, *dst);
341 }
342 else if (m)
343 {
344 d = in (src, m);
345 *dst = over (d, *dst);
346 }
347 dst++;
348 }
349 }
350}
351
352static void
353fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
354 pixman_op_t op,
355 pixman_image_t * src_image,
356 pixman_image_t * mask_image,
357 pixman_image_t * dst_image,
358 int32_t src_x,
359 int32_t src_y,
360 int32_t mask_x,
361 int32_t mask_y,
362 int32_t dest_x,
363 int32_t dest_y,
364 int32_t width,
365 int32_t height)
366{
367 uint32_t src, srca, s;
368 uint32_t *dst_line, *dst, d;
369 uint32_t *mask_line, *mask, ma;
370 int dst_stride, mask_stride;
371 int32_t w;
372
373 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
374
375 srca = src >> 24;
Value stored to 'srca' is never read
376 if (src == 0)
377 return;
378
379 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
380 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
381
382 while (height--)
383 {
384 dst = dst_line;
385 dst_line += dst_stride;
386 mask = mask_line;
387 mask_line += mask_stride;
388 w = width;
389
390 while (w--)
391 {
392 ma = *mask++;
393
394 if (ma)
395 {
396 d = *dst;
397 s = src;
398
399 UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (d) & 0xff00ff
; do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = ((s) >> 8); r3__ = ((ma) >> 8); do { t__
= (r2__ & 0xff) * (r3__ & 0xff); t__ |= (r2__ & 0xff0000
) * ((r3__ >> 8 * 2) & 0xff); t__ += 0x800080; t__ =
(t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ =
t__ & 0xff00ff; } while (0); r3__ = ((d) >> 8) &
0xff00ff; do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - (
(t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff)
; } while (0); (s) = r1__ | (r2__ << 8); } while (0)
;
400
401 *dst = s;
402 }
403
404 dst++;
405 }
406 }
407}
408
409static void
410fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
411 pixman_op_t op,
412 pixman_image_t * src_image,
413 pixman_image_t * mask_image,
414 pixman_image_t * dst_image,
415 int32_t src_x,
416 int32_t src_y,
417 int32_t mask_x,
418 int32_t mask_y,
419 int32_t dest_x,
420 int32_t dest_y,
421 int32_t width,
422 int32_t height)
423{
424 uint32_t src, srca, s;
425 uint32_t *dst_line, *dst, d;
426 uint32_t *mask_line, *mask, ma;
427 int dst_stride, mask_stride;
428 int32_t w;
429
430 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
431
432 srca = src >> 24;
433 if (src == 0)
434 return;
435
436 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
437 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
438
439 while (height--)
440 {
441 dst = dst_line;
442 dst_line += dst_stride;
443 mask = mask_line;
444 mask_line += mask_stride;
445 w = width;
446
447 while (w--)
448 {
449 ma = *mask++;
450 if (ma == 0xffffffff)
451 {
452 if (srca == 0xff)
453 *dst = src;
454 else
455 *dst = over (src, *dst);
456 }
457 else if (ma)
458 {
459 d = *dst;
460 s = src;
461
462 UN8x4_MUL_UN8x4 (s, ma)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (s) >>
8; r3__ = (ma) >> 8; do { t__ = (r2__ & 0xff) * (r3__
& 0xff); t__ |= (r2__ & 0xff0000) * ((r3__ >> 8
* 2) & 0xff); t__ += 0x800080; t__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ = t__ & 0xff00ff; }
while (0); (s) = r1__ | (r2__ << 8); } while (0)
;
463 UN8x4_MUL_UN8 (ma, srca)do { uint32_t r1__, r2__, t__; r1__ = (ma); do { t__ = ((r1__
) & 0xff00ff) * ((srca)); t__ += 0x800080; r1__ = (t__ + (
(t__ >> 8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff
; } while (0); r2__ = (ma) >> 8; do { t__ = ((r2__) &
0xff00ff) * ((srca)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); (ma) = r1__ | (r2__ << 8); } while (0)
;
464 ma = ~ma;
465 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s)do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (s) & 0xff00ff
; do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = ((d) >> 8); r3__ = ((ma) >> 8); do { t__
= (r2__ & 0xff) * (r3__ & 0xff); t__ |= (r2__ & 0xff0000
) * ((r3__ >> 8 * 2) & 0xff); t__ += 0x800080; t__ =
(t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ =
t__ & 0xff00ff; } while (0); r3__ = ((s) >> 8) &
0xff00ff; do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - (
(t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff)
; } while (0); (d) = r1__ | (r2__ << 8); } while (0)
;
466
467 *dst = d;
468 }
469
470 dst++;
471 }
472 }
473}
474
475static void
476fast_composite_over_n_8_0888 (pixman_implementation_t *imp,
477 pixman_op_t op,
478 pixman_image_t * src_image,
479 pixman_image_t * mask_image,
480 pixman_image_t * dst_image,
481 int32_t src_x,
482 int32_t src_y,
483 int32_t mask_x,
484 int32_t mask_y,
485 int32_t dest_x,
486 int32_t dest_y,
487 int32_t width,
488 int32_t height)
489{
490 uint32_t src, srca;
491 uint8_t *dst_line, *dst;
492 uint32_t d;
493 uint8_t *mask_line, *mask, m;
494 int dst_stride, mask_stride;
495 int32_t w;
496
497 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
498
499 srca = src >> 24;
500 if (src == 0)
501 return;
502
503 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (dst_line) = ((uint8_t *) __bits__) + (dst_stride) * (dest_y
) + (3) * (dest_x); } while (0)
;
504 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
505
506 while (height--)
507 {
508 dst = dst_line;
509 dst_line += dst_stride;
510 mask = mask_line;
511 mask_line += mask_stride;
512 w = width;
513
514 while (w--)
515 {
516 m = *mask++;
517 if (m == 0xff)
518 {
519 if (srca == 0xff)
520 {
521 d = src;
522 }
523 else
524 {
525 d = fetch_24 (dst);
526 d = over (src, d);
527 }
528 store_24 (dst, d);
529 }
530 else if (m)
531 {
532 d = over (in (src, m), fetch_24 (dst));
533 store_24 (dst, d);
534 }
535 dst += 3;
536 }
537 }
538}
539
540static void
541fast_composite_over_n_8_0565 (pixman_implementation_t *imp,
542 pixman_op_t op,
543 pixman_image_t * src_image,
544 pixman_image_t * mask_image,
545 pixman_image_t * dst_image,
546 int32_t src_x,
547 int32_t src_y,
548 int32_t mask_x,
549 int32_t mask_y,
550 int32_t dest_x,
551 int32_t dest_y,
552 int32_t width,
553 int32_t height)
554{
555 uint32_t src, srca;
556 uint16_t *dst_line, *dst;
557 uint32_t d;
558 uint8_t *mask_line, *mask, m;
559 int dst_stride, mask_stride;
560 int32_t w;
561
562 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
563
564 srca = src >> 24;
565 if (src == 0)
566 return;
567
568 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
569 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
570
571 while (height--)
572 {
573 dst = dst_line;
574 dst_line += dst_stride;
575 mask = mask_line;
576 mask_line += mask_stride;
577 w = width;
578
579 while (w--)
580 {
581 m = *mask++;
582 if (m == 0xff)
583 {
584 if (srca == 0xff)
585 {
586 d = src;
587 }
588 else
589 {
590 d = *dst;
591 d = over (src, CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
);
592 }
593 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
594 }
595 else if (m)
596 {
597 d = *dst;
598 d = over (in (src, m), CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
);
599 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
600 }
601 dst++;
602 }
603 }
604}
605
606static void
607fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
608 pixman_op_t op,
609 pixman_image_t * src_image,
610 pixman_image_t * mask_image,
611 pixman_image_t * dst_image,
612 int32_t src_x,
613 int32_t src_y,
614 int32_t mask_x,
615 int32_t mask_y,
616 int32_t dest_x,
617 int32_t dest_y,
618 int32_t width,
619 int32_t height)
620{
621 uint32_t src, srca, s;
622 uint16_t src16;
623 uint16_t *dst_line, *dst;
624 uint32_t d;
625 uint32_t *mask_line, *mask, ma;
626 int dst_stride, mask_stride;
627 int32_t w;
628
629 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
630
631 srca = src >> 24;
632 if (src == 0)
633 return;
634
635 src16 = CONVERT_8888_TO_0565 (src)((((src) >> 3) & 0x001f) | (((src) >> 5) &
0x07e0) | (((src) >> 8) & 0xf800))
;
636
637 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
638 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
639
640 while (height--)
641 {
642 dst = dst_line;
643 dst_line += dst_stride;
644 mask = mask_line;
645 mask_line += mask_stride;
646 w = width;
647
648 while (w--)
649 {
650 ma = *mask++;
651 if (ma == 0xffffffff)
652 {
653 if (srca == 0xff)
654 {
655 *dst = src16;
656 }
657 else
658 {
659 d = *dst;
660 d = over (src, CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
);
661 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
662 }
663 }
664 else if (ma)
665 {
666 d = *dst;
667 d = CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
;
668
669 s = src;
670
671 UN8x4_MUL_UN8x4 (s, ma)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (s) >>
8; r3__ = (ma) >> 8; do { t__ = (r2__ & 0xff) * (r3__
& 0xff); t__ |= (r2__ & 0xff0000) * ((r3__ >> 8
* 2) & 0xff); t__ += 0x800080; t__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ = t__ & 0xff00ff; }
while (0); (s) = r1__ | (r2__ << 8); } while (0)
;
672 UN8x4_MUL_UN8 (ma, srca)do { uint32_t r1__, r2__, t__; r1__ = (ma); do { t__ = ((r1__
) & 0xff00ff) * ((srca)); t__ += 0x800080; r1__ = (t__ + (
(t__ >> 8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff
; } while (0); r2__ = (ma) >> 8; do { t__ = ((r2__) &
0xff00ff) * ((srca)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); (ma) = r1__ | (r2__ << 8); } while (0)
;
673 ma = ~ma;
674 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s)do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (s) & 0xff00ff
; do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = ((d) >> 8); r3__ = ((ma) >> 8); do { t__
= (r2__ & 0xff) * (r3__ & 0xff); t__ |= (r2__ & 0xff0000
) * ((r3__ >> 8 * 2) & 0xff); t__ += 0x800080; t__ =
(t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ =
t__ & 0xff00ff; } while (0); r3__ = ((s) >> 8) &
0xff00ff; do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - (
(t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff)
; } while (0); (d) = r1__ | (r2__ << 8); } while (0)
;
675
676 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
677 }
678 dst++;
679 }
680 }
681}
682
683static void
684fast_composite_over_8888_8888 (pixman_implementation_t *imp,
685 pixman_op_t op,
686 pixman_image_t * src_image,
687 pixman_image_t * mask_image,
688 pixman_image_t * dst_image,
689 int32_t src_x,
690 int32_t src_y,
691 int32_t mask_x,
692 int32_t mask_y,
693 int32_t dest_x,
694 int32_t dest_y,
695 int32_t width,
696 int32_t height)
697{
698 uint32_t *dst_line, *dst;
699 uint32_t *src_line, *src, s;
700 int dst_stride, src_stride;
701 uint8_t a;
702 int32_t w;
703
704 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
705 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
706
707 while (height--)
708 {
709 dst = dst_line;
710 dst_line += dst_stride;
711 src = src_line;
712 src_line += src_stride;
713 w = width;
714
715 while (w--)
716 {
717 s = *src++;
718 a = s >> 24;
719 if (a == 0xff)
720 *dst = s;
721 else if (s)
722 *dst = over (s, *dst);
723 dst++;
724 }
725 }
726}
727
728static void
729fast_composite_src_x888_8888 (pixman_implementation_t *imp,
730 pixman_op_t op,
731 pixman_image_t * src_image,
732 pixman_image_t * mask_image,
733 pixman_image_t * dst_image,
734 int32_t src_x,
735 int32_t src_y,
736 int32_t mask_x,
737 int32_t mask_y,
738 int32_t dest_x,
739 int32_t dest_y,
740 int32_t width,
741 int32_t height)
742{
743 uint32_t *dst_line, *dst;
744 uint32_t *src_line, *src;
745 int dst_stride, src_stride;
746 int32_t w;
747
748 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
749 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
750
751 while (height--)
752 {
753 dst = dst_line;
754 dst_line += dst_stride;
755 src = src_line;
756 src_line += src_stride;
757 w = width;
758
759 while (w--)
760 *dst++ = (*src++) | 0xff000000;
761 }
762}
763
764#if 0
765static void
766fast_composite_over_8888_0888 (pixman_implementation_t *imp,
767 pixman_op_t op,
768 pixman_image_t * src_image,
769 pixman_image_t * mask_image,
770 pixman_image_t * dst_image,
771 int32_t src_x,
772 int32_t src_y,
773 int32_t mask_x,
774 int32_t mask_y,
775 int32_t dest_x,
776 int32_t dest_y,
777 int32_t width,
778 int32_t height)
779{
780 uint8_t *dst_line, *dst;
781 uint32_t d;
782 uint32_t *src_line, *src, s;
783 uint8_t a;
784 int dst_stride, src_stride;
785 int32_t w;
786
787 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (dst_line) = ((uint8_t *) __bits__) + (dst_stride) * (dest_y
) + (3) * (dest_x); } while (0)
;
788 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
789
790 while (height--)
791 {
792 dst = dst_line;
793 dst_line += dst_stride;
794 src = src_line;
795 src_line += src_stride;
796 w = width;
797
798 while (w--)
799 {
800 s = *src++;
801 a = s >> 24;
802 if (a)
803 {
804 if (a == 0xff)
805 d = s;
806 else
807 d = over (s, fetch_24 (dst));
808
809 store_24 (dst, d);
810 }
811 dst += 3;
812 }
813 }
814}
815#endif
816
817static void
818fast_composite_over_8888_0565 (pixman_implementation_t *imp,
819 pixman_op_t op,
820 pixman_image_t * src_image,
821 pixman_image_t * mask_image,
822 pixman_image_t * dst_image,
823 int32_t src_x,
824 int32_t src_y,
825 int32_t mask_x,
826 int32_t mask_y,
827 int32_t dest_x,
828 int32_t dest_y,
829 int32_t width,
830 int32_t height)
831{
832 uint16_t *dst_line, *dst;
833 uint32_t d;
834 uint32_t *src_line, *src, s;
835 uint8_t a;
836 int dst_stride, src_stride;
837 int32_t w;
838
839 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
840 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
841
842 while (height--)
843 {
844 dst = dst_line;
845 dst_line += dst_stride;
846 src = src_line;
847 src_line += src_stride;
848 w = width;
849
850 while (w--)
851 {
852 s = *src++;
853 a = s >> 24;
854 if (s)
855 {
856 if (a == 0xff)
857 {
858 d = s;
859 }
860 else
861 {
862 d = *dst;
863 d = over (s, CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
);
864 }
865 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
866 }
867 dst++;
868 }
869 }
870}
871
872static void
873fast_composite_src_x888_0565 (pixman_implementation_t *imp,
874 pixman_op_t op,
875 pixman_image_t * src_image,
876 pixman_image_t * mask_image,
877 pixman_image_t * dst_image,
878 int32_t src_x,
879 int32_t src_y,
880 int32_t mask_x,
881 int32_t mask_y,
882 int32_t dest_x,
883 int32_t dest_y,
884 int32_t width,
885 int32_t height)
886{
887 uint16_t *dst_line, *dst;
888 uint32_t *src_line, *src, s;
889 int dst_stride, src_stride;
890 int32_t w;
891
892 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
893 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
894
895 while (height--)
896 {
897 dst = dst_line;
898 dst_line += dst_stride;
899 src = src_line;
900 src_line += src_stride;
901 w = width;
902
903 while (w--)
904 {
905 s = *src++;
906 *dst = CONVERT_8888_TO_0565 (s)((((s) >> 3) & 0x001f) | (((s) >> 5) & 0x07e0
) | (((s) >> 8) & 0xf800))
;
907 dst++;
908 }
909 }
910}
911
912static void
913fast_composite_add_8_8 (pixman_implementation_t *imp,
914 pixman_op_t op,
915 pixman_image_t * src_image,
916 pixman_image_t * mask_image,
917 pixman_image_t * dst_image,
918 int32_t src_x,
919 int32_t src_y,
920 int32_t mask_x,
921 int32_t mask_y,
922 int32_t dest_x,
923 int32_t dest_y,
924 int32_t width,
925 int32_t height)
926{
927 uint8_t *dst_line, *dst;
928 uint8_t *src_line, *src;
929 int dst_stride, src_stride;
930 int32_t w;
931 uint8_t s, d;
932 uint16_t t;
933
934 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (src_line) = ((uint8_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
935 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (dst_line) = ((uint8_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
936
937 while (height--)
938 {
939 dst = dst_line;
940 dst_line += dst_stride;
941 src = src_line;
942 src_line += src_stride;
943 w = width;
944
945 while (w--)
946 {
947 s = *src++;
948 if (s)
949 {
950 if (s != 0xff)
951 {
952 d = *dst;
953 t = d + s;
954 s = t | (0 - (t >> 8));
955 }
956 *dst = s;
957 }
958 dst++;
959 }
960 }
961}
962
963static void
964fast_composite_add_8888_8888 (pixman_implementation_t *imp,
965 pixman_op_t op,
966 pixman_image_t * src_image,
967 pixman_image_t * mask_image,
968 pixman_image_t * dst_image,
969 int32_t src_x,
970 int32_t src_y,
971 int32_t mask_x,
972 int32_t mask_y,
973 int32_t dest_x,
974 int32_t dest_y,
975 int32_t width,
976 int32_t height)
977{
978 uint32_t *dst_line, *dst;
979 uint32_t *src_line, *src;
980 int dst_stride, src_stride;
981 int32_t w;
982 uint32_t s, d;
983
984 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
985 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
986
987 while (height--)
988 {
989 dst = dst_line;
990 dst_line += dst_stride;
991 src = src_line;
992 src_line += src_stride;
993 w = width;
994
995 while (w--)
996 {
997 s = *src++;
998 if (s)
999 {
1000 if (s != 0xffffffff)
1001 {
1002 d = *dst;
1003 if (d)
1004 UN8x4_ADD_UN8x4 (s, d)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s) & 0xff00ff
; r2__ = (d) & 0xff00ff; do { t__ = ((r1__) + (r2__)); t__
|= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__ = (t__
& 0xff00ff); } while (0); r2__ = ((s) >> 8) & 0xff00ff
; r3__ = ((d) >> 8) & 0xff00ff; do { t__ = ((r2__) +
(r3__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff
); r2__ = (t__ & 0xff00ff); } while (0); s = r1__ | (r2__
<< 8); } while (0)
;
1005 }
1006 *dst = s;
1007 }
1008 dst++;
1009 }
1010 }
1011}
1012
1013static void
1014fast_composite_add_n_8_8 (pixman_implementation_t *imp,
1015 pixman_op_t op,
1016 pixman_image_t * src_image,
1017 pixman_image_t * mask_image,
1018 pixman_image_t * dst_image,
1019 int32_t src_x,
1020 int32_t src_y,
1021 int32_t mask_x,
1022 int32_t mask_y,
1023 int32_t dest_x,
1024 int32_t dest_y,
1025 int32_t width,
1026 int32_t height)
1027{
1028 uint8_t *dst_line, *dst;
1029 uint8_t *mask_line, *mask;
1030 int dst_stride, mask_stride;
1031 int32_t w;
1032 uint32_t src;
1033 uint8_t sa;
1034
1035 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (dst_line) = ((uint8_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
1036 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
1037 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
1038 sa = (src >> 24);
1039
1040 while (height--)
1041 {
1042 dst = dst_line;
1043 dst_line += dst_stride;
1044 mask = mask_line;
1045 mask_line += mask_stride;
1046 w = width;
1047
1048 while (w--)
1049 {
1050 uint16_t tmp;
1051 uint16_t a;
1052 uint32_t m, d;
1053 uint32_t r;
1054
1055 a = *mask++;
1056 d = *dst;
1057
1058 m = MUL_UN8 (sa, a, tmp)((tmp) = (sa) * (a) + 0x80, ((((tmp) >> 8 ) + (tmp) ) >>
8 ))
;
1059 r = ADD_UN8 (m, d, tmp)((tmp) = (m) + (d), (uint32_t) (uint8_t) ((tmp) | (0 - ((tmp)
>> 8))))
;
1060
1061 *dst++ = r;
1062 }
1063 }
1064}
1065
1066#ifdef WORDS_BIGENDIAN
1067#define CREATE_BITMASK(n)(1 << (n)) (0x80000000 >> (n))
1068#define UPDATE_BITMASK(n)((n) << 1) ((n) >> 1)
1069#else
1070#define CREATE_BITMASK(n)(1 << (n)) (1 << (n))
1071#define UPDATE_BITMASK(n)((n) << 1) ((n) << 1)
1072#endif
1073
1074#define TEST_BIT(p, n)(*((p) + ((n) >> 5)) & (1 << ((n) & 31))) \
1075 (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31)(1 << ((n) & 31)))
1076#define SET_BIT(p, n)do { *((p) + ((n) >> 5)) |= (1 << ((n) & 31))
; } while (0);
\
1077 do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31)(1 << ((n) & 31)); } while (0);
1078
1079static void
1080fast_composite_add_1000_1000 (pixman_implementation_t *imp,
1081 pixman_op_t op,
1082 pixman_image_t * src_image,
1083 pixman_image_t * mask_image,
1084 pixman_image_t * dst_image,
1085 int32_t src_x,
1086 int32_t src_y,
1087 int32_t mask_x,
1088 int32_t mask_y,
1089 int32_t dest_x,
1090 int32_t dest_y,
1091 int32_t width,
1092 int32_t height)
1093{
1094 uint32_t *dst_line, *dst;
1095 uint32_t *src_line, *src;
1096 int dst_stride, src_stride;
1097 int32_t w;
1098
1099 PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (0); } while (0)
1100 src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (0); } while (0)
;
1101 PIXMAN_IMAGE_GET_LINE (dst_image, 0, dest_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (0); } while (0)
1102 dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (0); } while (0)
;
1103
1104 while (height--)
1105 {
1106 dst = dst_line;
1107 dst_line += dst_stride;
1108 src = src_line;
1109 src_line += src_stride;
1110 w = width;
1111
1112 while (w--)
1113 {
1114 /*
1115 * TODO: improve performance by processing uint32_t data instead
1116 * of individual bits
1117 */
1118 if (TEST_BIT (src, src_x + w)(*((src) + ((src_x + w) >> 5)) & (1 << ((src_x
+ w) & 31)))
)
1119 SET_BIT (dst, dest_x + w)do { *((dst) + ((dest_x + w) >> 5)) |= (1 << ((dest_x
+ w) & 31)); } while (0);
;
1120 }
1121 }
1122}
1123
1124static void
1125fast_composite_over_n_1_8888 (pixman_implementation_t *imp,
1126 pixman_op_t op,
1127 pixman_image_t * src_image,
1128 pixman_image_t * mask_image,
1129 pixman_image_t * dst_image,
1130 int32_t src_x,
1131 int32_t src_y,
1132 int32_t mask_x,
1133 int32_t mask_y,
1134 int32_t dest_x,
1135 int32_t dest_y,
1136 int32_t width,
1137 int32_t height)
1138{
1139 uint32_t src, srca;
1140 uint32_t *dst, *dst_line;
1141 uint32_t *mask, *mask_line;
1142 int mask_stride, dst_stride;
1143 uint32_t bitcache, bitmask;
1144 int32_t w;
1145
1146 if (width <= 0)
1147 return;
1148
1149 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
1150 srca = src >> 24;
1151 if (src == 0)
1152 return;
1153
1154 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
1155 dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
1156 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (0); } while (0)
1157 mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (0); } while (0)
;
1158 mask_line += mask_x >> 5;
1159
1160 if (srca == 0xff)
1161 {
1162 while (height--)
1163 {
1164 dst = dst_line;
1165 dst_line += dst_stride;
1166 mask = mask_line;
1167 mask_line += mask_stride;
1168 w = width;
1169
1170 bitcache = *mask++;
1171 bitmask = CREATE_BITMASK (mask_x & 31)(1 << (mask_x & 31));
1172
1173 while (w--)
1174 {
1175 if (bitmask == 0)
1176 {
1177 bitcache = *mask++;
1178 bitmask = CREATE_BITMASK (0)(1 << (0));
1179 }
1180 if (bitcache & bitmask)
1181 *dst = src;
1182 bitmask = UPDATE_BITMASK (bitmask)((bitmask) << 1);
1183 dst++;
1184 }
1185 }
1186 }
1187 else
1188 {
1189 while (height--)
1190 {
1191 dst = dst_line;
1192 dst_line += dst_stride;
1193 mask = mask_line;
1194 mask_line += mask_stride;
1195 w = width;
1196
1197 bitcache = *mask++;
1198 bitmask = CREATE_BITMASK (mask_x & 31)(1 << (mask_x & 31));
1199
1200 while (w--)
1201 {
1202 if (bitmask == 0)
1203 {
1204 bitcache = *mask++;
1205 bitmask = CREATE_BITMASK (0)(1 << (0));
1206 }
1207 if (bitcache & bitmask)
1208 *dst = over (src, *dst);
1209 bitmask = UPDATE_BITMASK (bitmask)((bitmask) << 1);
1210 dst++;
1211 }
1212 }
1213 }
1214}
1215
1216static void
1217fast_composite_over_n_1_0565 (pixman_implementation_t *imp,
1218 pixman_op_t op,
1219 pixman_image_t * src_image,
1220 pixman_image_t * mask_image,
1221 pixman_image_t * dst_image,
1222 int32_t src_x,
1223 int32_t src_y,
1224 int32_t mask_x,
1225 int32_t mask_y,
1226 int32_t dest_x,
1227 int32_t dest_y,
1228 int32_t width,
1229 int32_t height)
1230{
1231 uint32_t src, srca;
1232 uint16_t *dst, *dst_line;
1233 uint32_t *mask, *mask_line;
1234 int mask_stride, dst_stride;
1235 uint32_t bitcache, bitmask;
1236 int32_t w;
1237 uint32_t d;
1238 uint16_t src565;
1239
1240 if (width <= 0)
1241 return;
1242
1243 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
1244 srca = src >> 24;
1245 if (src == 0)
1246 return;
1247
1248 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
1249 dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
1250 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (0); } while (0)
1251 mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (0); } while (0)
;
1252 mask_line += mask_x >> 5;
1253
1254 if (srca == 0xff)
1255 {
1256 src565 = CONVERT_8888_TO_0565 (src)((((src) >> 3) & 0x001f) | (((src) >> 5) &
0x07e0) | (((src) >> 8) & 0xf800))
;
1257 while (height--)
1258 {
1259 dst = dst_line;
1260 dst_line += dst_stride;
1261 mask = mask_line;
1262 mask_line += mask_stride;
1263 w = width;
1264
1265 bitcache = *mask++;
1266 bitmask = CREATE_BITMASK (mask_x & 31)(1 << (mask_x & 31));
1267
1268 while (w--)
1269 {
1270 if (bitmask == 0)
1271 {
1272 bitcache = *mask++;
1273 bitmask = CREATE_BITMASK (0)(1 << (0));
1274 }
1275 if (bitcache & bitmask)
1276 *dst = src565;
1277 bitmask = UPDATE_BITMASK (bitmask)((bitmask) << 1);
1278 dst++;
1279 }
1280 }
1281 }
1282 else
1283 {
1284 while (height--)
1285 {
1286 dst = dst_line;
1287 dst_line += dst_stride;
1288 mask = mask_line;
1289 mask_line += mask_stride;
1290 w = width;
1291
1292 bitcache = *mask++;
1293 bitmask = CREATE_BITMASK (mask_x & 31)(1 << (mask_x & 31));
1294
1295 while (w--)
1296 {
1297 if (bitmask == 0)
1298 {
1299 bitcache = *mask++;
1300 bitmask = CREATE_BITMASK (0)(1 << (0));
1301 }
1302 if (bitcache & bitmask)
1303 {
1304 d = over (src, CONVERT_0565_TO_0888 (*dst)(((((*dst) << 3) & 0xf8) | (((*dst) >> 2) &
0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst) >>
1) & 0x300)) | ((((*dst) << 8) & 0xf80000) | (
((*dst) << 3) & 0x70000)))
);
1305 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
1306 }
1307 bitmask = UPDATE_BITMASK (bitmask)((bitmask) << 1);
1308 dst++;
1309 }
1310 }
1311 }
1312}
1313
1314/*
1315 * Simple bitblt
1316 */
1317
1318static void
1319fast_composite_solid_fill (pixman_implementation_t *imp,
1320 pixman_op_t op,
1321 pixman_image_t * src_image,
1322 pixman_image_t * mask_image,
1323 pixman_image_t * dst_image,
1324 int32_t src_x,
1325 int32_t src_y,
1326 int32_t mask_x,
1327 int32_t mask_y,
1328 int32_t dest_x,
1329 int32_t dest_y,
1330 int32_t width,
1331 int32_t height)
1332{
1333 uint32_t src;
1334
1335 src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
1336
1337 if (dst_image->bits.format == PIXMAN_a1)
1338 {
1339 src = src >> 31;
1340 }
1341 else if (dst_image->bits.format == PIXMAN_a8)
1342 {
1343 src = src >> 24;
1344 }
1345 else if (dst_image->bits.format == PIXMAN_r5g6b5 ||
1346 dst_image->bits.format == PIXMAN_b5g6r5)
1347 {
1348 src = CONVERT_8888_TO_0565 (src)((((src) >> 3) & 0x001f) | (((src) >> 5) &
0x07e0) | (((src) >> 8) & 0xf800))
;
1349 }
1350
1351 pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
1352 PIXMAN_FORMAT_BPP (dst_image->bits.format)(((dst_image->bits.format) >> 24) ),
1353 dest_x, dest_y,
1354 width, height,
1355 src);
1356}
1357
1358static void
1359fast_composite_src_memcpy (pixman_implementation_t *imp,
1360 pixman_op_t op,
1361 pixman_image_t * src_image,
1362 pixman_image_t * mask_image,
1363 pixman_image_t * dst_image,
1364 int32_t src_x,
1365 int32_t src_y,
1366 int32_t mask_x,
1367 int32_t mask_y,
1368 int32_t dest_x,
1369 int32_t dest_y,
1370 int32_t width,
1371 int32_t height)
1372{
1373 int bpp = PIXMAN_FORMAT_BPP (dst_image->bits.format)(((dst_image->bits.format) >> 24) ) / 8;
1374 uint32_t n_bytes = width * bpp;
1375 int dst_stride, src_stride;
1376 uint8_t *dst;
1377 uint8_t *src;
1378
1379 src_stride = src_image->bits.rowstride * 4;
1380 dst_stride = dst_image->bits.rowstride * 4;
1381
1382 src = (uint8_t *)src_image->bits.bits + src_y * src_stride + src_x * bpp;
1383 dst = (uint8_t *)dst_image->bits.bits + dest_y * dst_stride + dest_x * bpp;
1384
1385 while (height--)
1386 {
1387 memcpy (dst, src, n_bytes);
1388
1389 dst += dst_stride;
1390 src += src_stride;
1391 }
1392}
1393
1394FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, SRC, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_cover_SRC
(uint32_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_8888_8888_cover_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint32_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image->bits.width
<< 16; max_vy = src_image->bits.height << 16;
repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD || -1 == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL, &
vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_8888_8888_cover_SRC
(dst, src, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_cover_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_cover_SRC (dst +
left_pad + width, src + src_image->bits.width - 1, right_pad
, 0, 0, 0); } } else if (-1 == PIXMAN_REPEAT_NONE) { static uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_8888_8888_cover_SRC (dst, zero
, left_pad + width + right_pad, 0, 0, 0); continue; } src = src_first_line
+ src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_8888_8888_cover_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_cover_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_cover_SRC (dst +
left_pad + width, zero, right_pad, 0, 0, 0); } } else { src =
src_first_line + src_stride * y; scaled_nearest_scanline_8888_8888_cover_SRC
(dst, src, width, vx, unit_x, max_vx); } } }
1395FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, SRC, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_none_SRC
(uint32_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_8888_8888_none_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint32_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_none_SRC (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_none_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_none_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) {
static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_8888_none_SRC
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_none_SRC (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_none_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_none_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_8888_8888_none_SRC
(dst, src, width, vx, unit_x, max_vx); } } }
1396FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, SRC, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_pad_SRC
(uint32_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_8888_8888_pad_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint32_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_pad_SRC (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_pad_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_pad_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) {
static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_8888_pad_SRC
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_pad_SRC (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_pad_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_pad_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_8888_8888_pad_SRC (
dst, src, width, vx, unit_x, max_vx); } } }
1397FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, SRC, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_normal_SRC
(uint32_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_8888_8888_normal_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint32_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_normal_SRC (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_normal_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_normal_SRC (dst +
left_pad + width, src + src_image->bits.width - 1, right_pad
, 0, 0, 0); } } else if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_8888_normal_SRC
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_normal_SRC (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_normal_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_normal_SRC (dst +
left_pad + width, zero, right_pad, 0, 0, 0); } } else { src =
src_first_line + src_stride * y; scaled_nearest_scanline_8888_8888_normal_SRC
(dst, src, width, vx, unit_x, max_vx); } } }
1398FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, OVER, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_cover_OVER
(uint32_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_8888_8888_cover_OVER
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint32_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image->bits.width
<< 16; max_vy = src_image->bits.height << 16;
repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD || -1 == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL, &
vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_8888_8888_cover_OVER
(dst, src, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_cover_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_cover_OVER (dst +
left_pad + width, src + src_image->bits.width - 1, right_pad
, 0, 0, 0); } } else if (-1 == PIXMAN_REPEAT_NONE) { static uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_8888_8888_cover_OVER (dst, zero
, left_pad + width + right_pad, 0, 0, 0); continue; } src = src_first_line
+ src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_8888_8888_cover_OVER
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_cover_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_cover_OVER (dst +
left_pad + width, zero, right_pad, 0, 0, 0); } } else { src =
src_first_line + src_stride * y; scaled_nearest_scanline_8888_8888_cover_OVER
(dst, src, width, vx, unit_x, max_vx); } } }
1399FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, OVER, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_none_OVER
(uint32_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_8888_8888_none_OVER
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint32_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_none_OVER (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_none_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_none_OVER (dst +
left_pad + width, src + src_image->bits.width - 1, right_pad
, 0, 0, 0); } } else if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE
) { static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_8888_none_OVER
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_none_OVER (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_none_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_none_OVER (dst +
left_pad + width, zero, right_pad, 0, 0, 0); } } else { src =
src_first_line + src_stride * y; scaled_nearest_scanline_8888_8888_none_OVER
(dst, src, width, vx, unit_x, max_vx); } } }
1400FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, OVER, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_pad_OVER
(uint32_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_8888_8888_pad_OVER
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint32_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_pad_OVER (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_pad_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_pad_OVER (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) {
static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_8888_pad_OVER
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_pad_OVER (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_pad_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_pad_OVER (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_8888_8888_pad_OVER
(dst, src, width, vx, unit_x, max_vx); } } }
1401FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, OVER, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_normal_OVER
(uint32_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_8888_8888_normal_OVER
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint32_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_normal_OVER (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_normal_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_normal_OVER (dst
+ left_pad + width, src + src_image->bits.width - 1, right_pad
, 0, 0, 0); } } else if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_8888_normal_OVER
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_8888_normal_OVER (dst, zero,
left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_8888_normal_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_8888_normal_OVER (dst
+ left_pad + width, zero, right_pad, 0, 0, 0); } } else { src
= src_first_line + src_stride * y; scaled_nearest_scanline_8888_8888_normal_OVER
(dst, src, width, vx, unit_x, max_vx); } } }
1402FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, SRC, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_cover_SRC
(uint16_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_SRC ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static void fast_composite_scaled_nearest_8888_565_cover_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image->bits.width
<< 16; max_vy = src_image->bits.height << 16;
repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD || -1 == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL, &
vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_8888_565_cover_SRC
(dst, src, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_cover_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_cover_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (-1 == PIXMAN_REPEAT_NONE) { static uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_8888_565_cover_SRC (dst, zero
, left_pad + width + right_pad, 0, 0, 0); continue; } src = src_first_line
+ src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_8888_565_cover_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_cover_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_cover_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_8888_565_cover_SRC
(dst, src, width, vx, unit_x, max_vx); } } }
1403FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, SRC, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_none_SRC
(uint16_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_SRC ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static void fast_composite_scaled_nearest_8888_565_none_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_none_SRC (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_none_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_none_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) {
static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_565_none_SRC
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_none_SRC (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_none_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_none_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_8888_565_none_SRC (
dst, src, width, vx, unit_x, max_vx); } } }
1404FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, SRC, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_pad_SRC
(uint16_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_SRC ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static void fast_composite_scaled_nearest_8888_565_pad_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_pad_SRC (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_pad_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_pad_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) {
static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_565_pad_SRC (
dst, zero, left_pad + width + right_pad, 0, 0, 0); continue; }
src = src_first_line + src_stride * y; if (left_pad > 0) {
scaled_nearest_scanline_8888_565_pad_SRC (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_pad_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_pad_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_8888_565_pad_SRC (
dst, src, width, vx, unit_x, max_vx); } } }
1405FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, SRC, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_normal_SRC
(uint16_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_SRC ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static void fast_composite_scaled_nearest_8888_565_normal_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_normal_SRC (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_normal_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_normal_SRC (dst +
left_pad + width, src + src_image->bits.width - 1, right_pad
, 0, 0, 0); } } else if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_565_normal_SRC
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_normal_SRC (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_normal_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_normal_SRC (dst +
left_pad + width, zero, right_pad, 0, 0, 0); } } else { src =
src_first_line + src_stride * y; scaled_nearest_scanline_8888_565_normal_SRC
(dst, src, width, vx, unit_x, max_vx); } } }
1406FAST_NEAREST (565_565_normal, 0565, 0565, uint16_t, uint16_t, SRC, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_normal_SRC
(uint16_t *dst, uint16_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint16_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
0xff; a2 = 0xff; if (a1 == 0xff) { *dst = (s1); } else if (s1
) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
((((((s1) << 3) & 0xf8) | (((s1) >> 2) &
0x7)) | ((((s1) << 5) & 0xfc00) | (((s1) >> 1
) & 0x300)) | ((((s1) << 8) & 0xf80000) | (((s1
) << 3) & 0x70000))) | 0xff000000); a1 ^= 0xff; do {
uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; if (a2 ==
0xff) { *dst = (s2); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = ((((((s2) << 3
) & 0xf8) | (((s2) >> 2) & 0x7)) | ((((s2) <<
5) & 0xfc00) | (((s2) >> 1) & 0x300)) | ((((s2
) << 8) & 0xf80000) | (((s2) << 3) & 0x70000
))) | 0xff000000); a2 ^= 0xff; do { uint32_t r1__, r2__, r3__
, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__ = ((r1__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff
; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = ((((d) >> 3) &
0x001f) | (((d) >> 5) & 0x07e0) | (((d) >> 8
) & 0xf800)); } dst++; } else { *dst++ = (s1); *dst++ = (
s2); } } if (w & 1) { x1 = vx >> 16; s1 = src[x1]; if
(PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 = 0xff; if (a1 == 0xff
) { *dst = (s1); } else if (s1) { d = ((((((*dst) << 3)
& 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst) <<
5) & 0xfc00) | (((*dst) >> 1) & 0x300)) | ((((
*dst) << 8) & 0xf80000) | (((*dst) << 3) &
0x70000))) | 0xff000000); s1 = ((((((s1) << 3) & 0xf8
) | (((s1) >> 2) & 0x7)) | ((((s1) << 5) &
0xfc00) | (((s1) >> 1) & 0x300)) | ((((s1) <<
8) & 0xf80000) | (((s1) << 3) & 0x70000))) | 0xff000000
); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d
); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = (s1); } } } static void fast_composite_scaled_nearest_565_565_normal_SRC
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint16_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint16_t); (src_first_line) = ((uint16_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_565_565_normal_SRC (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_normal_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_normal_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { static uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_565_565_normal_SRC
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_565_565_normal_SRC (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_normal_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_normal_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_normal_SRC
(dst, src, width, vx, unit_x, max_vx); } } }
1407FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, OVER, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_cover_OVER
(uint16_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_OVER ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static void fast_composite_scaled_nearest_8888_565_cover_OVER
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image->bits.width
<< 16; max_vy = src_image->bits.height << 16;
repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD || -1 == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL, &
vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_8888_565_cover_OVER
(dst, src, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_cover_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_cover_OVER (dst +
left_pad + width, src + src_image->bits.width - 1, right_pad
, 0, 0, 0); } } else if (-1 == PIXMAN_REPEAT_NONE) { static uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_8888_565_cover_OVER (dst, zero
, left_pad + width + right_pad, 0, 0, 0); continue; } src = src_first_line
+ src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_8888_565_cover_OVER
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_cover_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_cover_OVER (dst +
left_pad + width, zero, right_pad, 0, 0, 0); } } else { src =
src_first_line + src_stride * y; scaled_nearest_scanline_8888_565_cover_OVER
(dst, src, width, vx, unit_x, max_vx); } } }
1408FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, OVER, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_none_OVER
(uint16_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_OVER ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static void fast_composite_scaled_nearest_8888_565_none_OVER
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_none_OVER (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_none_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_none_OVER (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) {
static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_565_none_OVER
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_none_OVER (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_none_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_none_OVER (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_8888_565_none_OVER
(dst, src, width, vx, unit_x, max_vx); } } }
1409FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, OVER, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_pad_OVER
(uint16_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_OVER ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static void fast_composite_scaled_nearest_8888_565_pad_OVER
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_pad_OVER (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_pad_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_pad_OVER (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) {
static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_565_pad_OVER
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_pad_OVER (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_pad_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_pad_OVER (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_8888_565_pad_OVER (
dst, src, width, vx, unit_x, max_vx); } } }
1410FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_normal_OVER
(uint16_t *dst, uint32_t *src, int32_t w, pixman_fixed_t vx,
pixman_fixed_t unit_x, pixman_fixed_t max_vx) { uint32_t d; uint32_t
s1, s2; uint8_t a1, a2; int x1, x2; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_OVER ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static void fast_composite_scaled_nearest_8888_565_normal_OVER
(pixman_implementation_t *imp, pixman_op_t op, pixman_image_t
* src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint32_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_first_line) = ((uint32_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_normal_OVER (dst, src, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_normal_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_normal_OVER (dst +
left_pad + width, src + src_image->bits.width - 1, right_pad
, 0, 0, 0); } } else if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE
) { static uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_8888_565_normal_OVER
(dst, zero, left_pad + width + right_pad, 0, 0, 0); continue
; } src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_8888_565_normal_OVER (dst, zero, left_pad
, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_8888_565_normal_OVER
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_8888_565_normal_OVER (dst +
left_pad + width, zero, right_pad, 0, 0, 0); } } else { src =
src_first_line + src_stride * y; scaled_nearest_scanline_8888_565_normal_OVER
(dst, src, width, vx, unit_x, max_vx); } } }
1411
1412/* Use more unrolling for src_0565_0565 because it is typically CPU bound */
1413static force_inline__inline__ __attribute__ ((__always_inline__)) void
1414scaled_nearest_scanline_565_565_SRC (uint16_t * dst,
1415 uint16_t * src,
1416 int32_t w,
1417 pixman_fixed_t vx,
1418 pixman_fixed_t unit_x,
1419 pixman_fixed_t max_vx)
1420{
1421 uint16_t tmp1, tmp2, tmp3, tmp4;
1422 while ((w -= 4) >= 0)
1423 {
1424 tmp1 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1425 vx += unit_x;
1426 tmp2 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1427 vx += unit_x;
1428 tmp3 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1429 vx += unit_x;
1430 tmp4 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1431 vx += unit_x;
1432 *dst++ = tmp1;
1433 *dst++ = tmp2;
1434 *dst++ = tmp3;
1435 *dst++ = tmp4;
1436 }
1437 if (w & 2)
1438 {
1439 tmp1 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1440 vx += unit_x;
1441 tmp2 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1442 vx += unit_x;
1443 *dst++ = tmp1;
1444 *dst++ = tmp2;
1445 }
1446 if (w & 1)
1447 *dst++ = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1448}
1449
1450FAST_NEAREST_MAINLOOP (565_565_cover_SRC,static void fast_composite_scaled_nearest_565_565_cover_SRC (
pixman_implementation_t *imp, pixman_op_t op, pixman_image_t *
src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint16_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint16_t); (src_first_line) = ((uint16_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image->bits.width
<< 16; max_vy = src_image->bits.height << 16;
repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD || -1 == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL, &
vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, src, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (-1 == PIXMAN_REPEAT_NONE) { static uint16_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_565_565_SRC (dst, zero, left_pad
+ width + right_pad, 0, 0, 0); continue; } src = src_first_line
+ src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1451 scaled_nearest_scanline_565_565_SRC,static void fast_composite_scaled_nearest_565_565_cover_SRC (
pixman_implementation_t *imp, pixman_op_t op, pixman_image_t *
src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint16_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint16_t); (src_first_line) = ((uint16_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image->bits.width
<< 16; max_vy = src_image->bits.height << 16;
repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD || -1 == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL, &
vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, src, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (-1 == PIXMAN_REPEAT_NONE) { static uint16_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_565_565_SRC (dst, zero, left_pad
+ width + right_pad, 0, 0, 0); continue; } src = src_first_line
+ src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1452 uint16_t, uint16_t, COVER)static void fast_composite_scaled_nearest_565_565_cover_SRC (
pixman_implementation_t *imp, pixman_op_t op, pixman_image_t *
src_image, pixman_image_t * mask_image, pixman_image_t * dst_image
, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y
, int32_t dst_x, int32_t dst_y, int32_t width, int32_t height
) { uint16_t *dst_line; uint16_t *src_first_line; int y; pixman_fixed_t
max_vx = max_vx; pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; int src_stride, dst_stride; do
{ uint32_t *__bits__; int __stride__; __bits__ = dst_image->
bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (dst_line) = ((uint16_t *) __bits__) + (dst_stride) * (dst_y
) + (1) * (dst_x); } while (0); do { uint32_t *__bits__; int __stride__
; __bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint16_t); (src_first_line) = ((uint16_t *)
__bits__) + (src_stride) * (0) + (1) * (0); } while (0); v.vector
[0] = ((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image->bits.width
<< 16; max_vy = src_image->bits.height << 16;
repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD || -1 == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL, &
vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, src, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (-1 == PIXMAN_REPEAT_NONE) { static uint16_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_565_565_SRC (dst, zero, left_pad
+ width + right_pad, 0, 0, 0); continue; } src = src_first_line
+ src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1453FAST_NEAREST_MAINLOOP (565_565_none_SRC,static void fast_composite_scaled_nearest_565_565_none_SRC (pixman_implementation_t
*imp, pixman_op_t op, pixman_image_t * src_image, pixman_image_t
* mask_image, pixman_image_t * dst_image, int32_t src_x, int32_t
src_y, int32_t mask_x, int32_t mask_y, int32_t dst_x, int32_t
dst_y, int32_t width, int32_t height) { uint16_t *dst_line; uint16_t
*src_first_line; int y; pixman_fixed_t max_vx = max_vx; pixman_fixed_t
max_vy; pixman_vector_t v; pixman_fixed_t vx, vy; pixman_fixed_t
unit_x, unit_y; int32_t left_pad, right_pad; uint16_t *src; uint16_t
*dst; int src_stride, dst_stride; do { uint32_t *__bits__; int
__stride__; __bits__ = dst_image->bits.bits; __stride__ =
dst_image->bits.rowstride; (dst_stride) = __stride__ * (int
) sizeof (uint32_t) / (int) sizeof (uint16_t); (dst_line) = (
(uint16_t *) __bits__) + (dst_stride) * (dst_y) + (1) * (dst_x
); } while (0); do { uint32_t *__bits__; int __stride__; __bits__
= src_image->bits.bits; __stride__ = src_image->bits.rowstride
; (src_stride) = __stride__ * (int) sizeof (uint32_t) / (int)
sizeof (uint16_t); (src_first_line) = ((uint16_t *) __bits__
) + (src_stride) * (0) + (1) * (0); } while (0); v.vector[0] =
((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_565_565_SRC (dst, src, left_pad, 0
, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) {
static uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_565_565_SRC (dst,
zero, left_pad + width + right_pad, 0, 0, 0); continue; } src
= src_first_line + src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1454 scaled_nearest_scanline_565_565_SRC,static void fast_composite_scaled_nearest_565_565_none_SRC (pixman_implementation_t
*imp, pixman_op_t op, pixman_image_t * src_image, pixman_image_t
* mask_image, pixman_image_t * dst_image, int32_t src_x, int32_t
src_y, int32_t mask_x, int32_t mask_y, int32_t dst_x, int32_t
dst_y, int32_t width, int32_t height) { uint16_t *dst_line; uint16_t
*src_first_line; int y; pixman_fixed_t max_vx = max_vx; pixman_fixed_t
max_vy; pixman_vector_t v; pixman_fixed_t vx, vy; pixman_fixed_t
unit_x, unit_y; int32_t left_pad, right_pad; uint16_t *src; uint16_t
*dst; int src_stride, dst_stride; do { uint32_t *__bits__; int
__stride__; __bits__ = dst_image->bits.bits; __stride__ =
dst_image->bits.rowstride; (dst_stride) = __stride__ * (int
) sizeof (uint32_t) / (int) sizeof (uint16_t); (dst_line) = (
(uint16_t *) __bits__) + (dst_stride) * (dst_y) + (1) * (dst_x
); } while (0); do { uint32_t *__bits__; int __stride__; __bits__
= src_image->bits.bits; __stride__ = src_image->bits.rowstride
; (src_stride) = __stride__ * (int) sizeof (uint32_t) / (int)
sizeof (uint16_t); (src_first_line) = ((uint16_t *) __bits__
) + (src_stride) * (0) + (1) * (0); } while (0); v.vector[0] =
((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_565_565_SRC (dst, src, left_pad, 0
, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) {
static uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_565_565_SRC (dst,
zero, left_pad + width + right_pad, 0, 0, 0); continue; } src
= src_first_line + src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1455 uint16_t, uint16_t, NONE)static void fast_composite_scaled_nearest_565_565_none_SRC (pixman_implementation_t
*imp, pixman_op_t op, pixman_image_t * src_image, pixman_image_t
* mask_image, pixman_image_t * dst_image, int32_t src_x, int32_t
src_y, int32_t mask_x, int32_t mask_y, int32_t dst_x, int32_t
dst_y, int32_t width, int32_t height) { uint16_t *dst_line; uint16_t
*src_first_line; int y; pixman_fixed_t max_vx = max_vx; pixman_fixed_t
max_vy; pixman_vector_t v; pixman_fixed_t vx, vy; pixman_fixed_t
unit_x, unit_y; int32_t left_pad, right_pad; uint16_t *src; uint16_t
*dst; int src_stride, dst_stride; do { uint32_t *__bits__; int
__stride__; __bits__ = dst_image->bits.bits; __stride__ =
dst_image->bits.rowstride; (dst_stride) = __stride__ * (int
) sizeof (uint32_t) / (int) sizeof (uint16_t); (dst_line) = (
(uint16_t *) __bits__) + (dst_stride) * (dst_y) + (1) * (dst_x
); } while (0); do { uint32_t *__bits__; int __stride__; __bits__
= src_image->bits.bits; __stride__ = src_image->bits.rowstride
; (src_stride) = __stride__ * (int) sizeof (uint32_t) / (int)
sizeof (uint16_t); (src_first_line) = ((uint16_t *) __bits__
) + (src_stride) * (0) + (1) * (0); } while (0); v.vector[0] =
((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_565_565_SRC (dst, src, left_pad, 0
, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) {
static uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_565_565_SRC (dst,
zero, left_pad + width + right_pad, 0, 0, 0); continue; } src
= src_first_line + src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1456FAST_NEAREST_MAINLOOP (565_565_pad_SRC,static void fast_composite_scaled_nearest_565_565_pad_SRC (pixman_implementation_t
*imp, pixman_op_t op, pixman_image_t * src_image, pixman_image_t
* mask_image, pixman_image_t * dst_image, int32_t src_x, int32_t
src_y, int32_t mask_x, int32_t mask_y, int32_t dst_x, int32_t
dst_y, int32_t width, int32_t height) { uint16_t *dst_line; uint16_t
*src_first_line; int y; pixman_fixed_t max_vx = max_vx; pixman_fixed_t
max_vy; pixman_vector_t v; pixman_fixed_t vx, vy; pixman_fixed_t
unit_x, unit_y; int32_t left_pad, right_pad; uint16_t *src; uint16_t
*dst; int src_stride, dst_stride; do { uint32_t *__bits__; int
__stride__; __bits__ = dst_image->bits.bits; __stride__ =
dst_image->bits.rowstride; (dst_stride) = __stride__ * (int
) sizeof (uint32_t) / (int) sizeof (uint16_t); (dst_line) = (
(uint16_t *) __bits__) + (dst_stride) * (dst_y) + (1) * (dst_x
); } while (0); do { uint32_t *__bits__; int __stride__; __bits__
= src_image->bits.bits; __stride__ = src_image->bits.rowstride
; (src_stride) = __stride__ * (int) sizeof (uint32_t) / (int)
sizeof (uint16_t); (src_first_line) = ((uint16_t *) __bits__
) + (src_stride) * (0) + (1) * (0); } while (0); v.vector[0] =
((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_565_565_SRC (dst, src, left_pad, 0
, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) {
static uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_565_565_SRC (dst,
zero, left_pad + width + right_pad, 0, 0, 0); continue; } src
= src_first_line + src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1457 scaled_nearest_scanline_565_565_SRC,static void fast_composite_scaled_nearest_565_565_pad_SRC (pixman_implementation_t
*imp, pixman_op_t op, pixman_image_t * src_image, pixman_image_t
* mask_image, pixman_image_t * dst_image, int32_t src_x, int32_t
src_y, int32_t mask_x, int32_t mask_y, int32_t dst_x, int32_t
dst_y, int32_t width, int32_t height) { uint16_t *dst_line; uint16_t
*src_first_line; int y; pixman_fixed_t max_vx = max_vx; pixman_fixed_t
max_vy; pixman_vector_t v; pixman_fixed_t vx, vy; pixman_fixed_t
unit_x, unit_y; int32_t left_pad, right_pad; uint16_t *src; uint16_t
*dst; int src_stride, dst_stride; do { uint32_t *__bits__; int
__stride__; __bits__ = dst_image->bits.bits; __stride__ =
dst_image->bits.rowstride; (dst_stride) = __stride__ * (int
) sizeof (uint32_t) / (int) sizeof (uint16_t); (dst_line) = (
(uint16_t *) __bits__) + (dst_stride) * (dst_y) + (1) * (dst_x
); } while (0); do { uint32_t *__bits__; int __stride__; __bits__
= src_image->bits.bits; __stride__ = src_image->bits.rowstride
; (src_stride) = __stride__ * (int) sizeof (uint32_t) / (int)
sizeof (uint16_t); (src_first_line) = ((uint16_t *) __bits__
) + (src_stride) * (0) + (1) * (0); } while (0); v.vector[0] =
((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_565_565_SRC (dst, src, left_pad, 0
, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) {
static uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_565_565_SRC (dst,
zero, left_pad + width + right_pad, 0, 0, 0); continue; } src
= src_first_line + src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1458 uint16_t, uint16_t, PAD)static void fast_composite_scaled_nearest_565_565_pad_SRC (pixman_implementation_t
*imp, pixman_op_t op, pixman_image_t * src_image, pixman_image_t
* mask_image, pixman_image_t * dst_image, int32_t src_x, int32_t
src_y, int32_t mask_x, int32_t mask_y, int32_t dst_x, int32_t
dst_y, int32_t width, int32_t height) { uint16_t *dst_line; uint16_t
*src_first_line; int y; pixman_fixed_t max_vx = max_vx; pixman_fixed_t
max_vy; pixman_vector_t v; pixman_fixed_t vx, vy; pixman_fixed_t
unit_x, unit_y; int32_t left_pad, right_pad; uint16_t *src; uint16_t
*dst; int src_stride, dst_stride; do { uint32_t *__bits__; int
__stride__; __bits__ = dst_image->bits.bits; __stride__ =
dst_image->bits.rowstride; (dst_stride) = __stride__ * (int
) sizeof (uint32_t) / (int) sizeof (uint16_t); (dst_line) = (
(uint16_t *) __bits__) + (dst_stride) * (dst_y) + (1) * (dst_x
); } while (0); do { uint32_t *__bits__; int __stride__; __bits__
= src_image->bits.bits; __stride__ = src_image->bits.rowstride
; (src_stride) = __stride__ * (int) sizeof (uint32_t) / (int)
sizeof (uint16_t); (src_first_line) = ((uint16_t *) __bits__
) + (src_stride) * (0) + (1) * (0); } while (0); v.vector[0] =
((pixman_fixed_t) ((src_x) << 16)) + (((pixman_fixed_t
) ((1) << 16))) / 2; v.vector[1] = ((pixman_fixed_t) ((
src_y) << 16)) + (((pixman_fixed_t) ((1) << 16)))
/ 2; v.vector[2] = (((pixman_fixed_t) ((1) << 16))); if
(!pixman_transform_point_3d (src_image->common.transform,
&v)) return; unit_x = src_image->common.transform->
matrix[0][0]; unit_y = src_image->common.transform->matrix
[1][1]; v.vector[0] -= ((pixman_fixed_t) 1); v.vector[1] -= (
(pixman_fixed_t) 1); vx = v.vector[0]; vy = v.vector[1]; if (
PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE
) { pad_repeat_get_scanline_bounds (src_image->bits.width,
vx, unit_x, &width, &left_pad, &right_pad); vx +=
left_pad * unit_x; } while (--height >= 0) { dst = dst_line
; dst_line += dst_stride; y = vy >> 16; vy += unit_y; if
(PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD
) { repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height
); src = src_first_line + src_stride * y; if (left_pad > 0
) { scaled_nearest_scanline_565_565_SRC (dst, src, left_pad, 0
, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, src + src_image->bits.width - 1, right_pad, 0, 0
, 0); } } else if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) {
static uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image
->bits.height) { scaled_nearest_scanline_565_565_SRC (dst,
zero, left_pad + width + right_pad, 0, 0, 0); continue; } src
= src_first_line + src_stride * y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC
(dst, zero, left_pad, 0, 0, 0); } if (width > 0) { scaled_nearest_scanline_565_565_SRC
(dst + left_pad, src, width, vx, unit_x, 0); } if (right_pad
> 0) { scaled_nearest_scanline_565_565_SRC (dst + left_pad
+ width, zero, right_pad, 0, 0, 0); } } else { src = src_first_line
+ src_stride * y; scaled_nearest_scanline_565_565_SRC (dst, src
, width, vx, unit_x, max_vx); } } }
1459
1460static force_inline__inline__ __attribute__ ((__always_inline__)) uint32_t
1461fetch_nearest (pixman_repeat_t src_repeat,
1462 pixman_format_code_t format,
1463 uint32_t *src, int x, int src_width)
1464{
1465 if (repeat (src_repeat, &x, src_width))
1466 {
1467 if (format == PIXMAN_x8r8g8b8)
1468 return *(src + x) | 0xff000000;
1469 else
1470 return *(src + x);
1471 }
1472 else
1473 {
1474 return 0;
1475 }
1476}
1477
1478static force_inline__inline__ __attribute__ ((__always_inline__)) void
1479combine_over (uint32_t s, uint32_t *dst)
1480{
1481 if (s)
1482 {
1483 uint8_t ia = 0xff - (s >> 24);
1484
1485 if (ia)
1486 UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s)do { uint32_t r1__, r2__, r3__, t__; r1__ = (*dst); r2__ = (s
) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((ia))
; t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
*dst) >> 8; r3__ = ((s) >> 8) & 0xff00ff; do {
t__ = ((r2__) & 0xff00ff) * ((ia)); t__ += 0x800080; r2__
= (t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__
&= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__
|= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__ = (t__
& 0xff00ff); } while (0); (*dst) = r1__ | (r2__ <<
8); } while (0)
;
1487 else
1488 *dst = s;
1489 }
1490}
1491
1492static force_inline__inline__ __attribute__ ((__always_inline__)) void
1493combine_src (uint32_t s, uint32_t *dst)
1494{
1495 *dst = s;
1496}
1497
1498static void
1499fast_composite_scaled_nearest (pixman_implementation_t *imp,
1500 pixman_op_t op,
1501 pixman_image_t * src_image,
1502 pixman_image_t * mask_image,
1503 pixman_image_t * dst_image,
1504 int32_t src_x,
1505 int32_t src_y,
1506 int32_t mask_x,
1507 int32_t mask_y,
1508 int32_t dest_x,
1509 int32_t dest_y,
1510 int32_t width,
1511 int32_t height)
1512{
1513 uint32_t *dst_line;
1514 uint32_t *src_line;
1515 int dst_stride, src_stride;
1516 int src_width, src_height;
1517 pixman_repeat_t src_repeat;
1518 pixman_fixed_t unit_x, unit_y;
1519 pixman_format_code_t src_format;
1520 pixman_vector_t v;
1521 pixman_fixed_t vy;
1522
1523 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dst_image
->bits.bits; __stride__ = dst_image->bits.rowstride; (dst_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (dst_line) = ((uint32_t *) __bits__) + (dst_stride) * (dest_y
) + (1) * (dest_x); } while (0)
;
1524 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
1525 * transformed from destination space to source space
1526 */
1527 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (0) +
(1) * (0); } while (0)
;
1528
1529 /* reference point is the center of the pixel */
1530 v.vector[0] = pixman_int_to_fixed (src_x)((pixman_fixed_t) ((src_x) << 16)) + pixman_fixed_1(((pixman_fixed_t) ((1) << 16))) / 2;
1531 v.vector[1] = pixman_int_to_fixed (src_y)((pixman_fixed_t) ((src_y) << 16)) + pixman_fixed_1(((pixman_fixed_t) ((1) << 16))) / 2;
1532 v.vector[2] = pixman_fixed_1(((pixman_fixed_t) ((1) << 16)));
1533
1534 if (!pixman_transform_point_3d (src_image->common.transform, &v))
1535 return;
1536
1537 unit_x = src_image->common.transform->matrix[0][0];
1538 unit_y = src_image->common.transform->matrix[1][1];
1539
1540 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
1541 v.vector[0] -= pixman_fixed_e((pixman_fixed_t) 1);
1542 v.vector[1] -= pixman_fixed_e((pixman_fixed_t) 1);
1543
1544 src_height = src_image->bits.height;
1545 src_width = src_image->bits.width;
1546 src_repeat = src_image->common.repeat;
1547 src_format = src_image->bits.format;
1548
1549 vy = v.vector[1];
1550 while (height--)
1551 {
1552 pixman_fixed_t vx = v.vector[0];
1553 int y = pixman_fixed_to_int (vy)((int) ((vy) >> 16));
1554 uint32_t *dst = dst_line;
1555
1556 dst_line += dst_stride;
1557
1558 /* adjust the y location by a unit vector in the y direction
1559 * this is equivalent to transforming y+1 of the destination point to source space */
1560 vy += unit_y;
1561
1562 if (!repeat (src_repeat, &y, src_height))
1563 {
1564 if (op == PIXMAN_OP_SRC)
1565 memset (dst, 0, sizeof (*dst) * width);
1566 }
1567 else
1568 {
1569 int w = width;
1570
1571 uint32_t *src = src_line + y * src_stride;
1572
1573 while (w >= 2)
1574 {
1575 uint32_t s1, s2;
1576 int x1, x2;
1577
1578 x1 = pixman_fixed_to_int (vx)((int) ((vx) >> 16));
1579 vx += unit_x;
1580
1581 x2 = pixman_fixed_to_int (vx)((int) ((vx) >> 16));
1582 vx += unit_x;
1583
1584 w -= 2;
1585
1586 s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width);
1587 s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width);
1588
1589 if (op == PIXMAN_OP_OVER)
1590 {
1591 combine_over (s1, dst++);
1592 combine_over (s2, dst++);
1593 }
1594 else
1595 {
1596 combine_src (s1, dst++);
1597 combine_src (s2, dst++);
1598 }
1599 }
1600
1601 while (w--)
1602 {
1603 uint32_t s;
1604 int x;
1605
1606 x = pixman_fixed_to_int (vx)((int) ((vx) >> 16));
1607 vx += unit_x;
1608
1609 s = fetch_nearest (src_repeat, src_format, src, x, src_width);
1610
1611 if (op == PIXMAN_OP_OVER)
1612 combine_over (s, dst++);
1613 else
1614 combine_src (s, dst++);
1615 }
1616 }
1617 }
1618}
1619
1620static const pixman_fast_path_t c_fast_paths[] =
1621{
1622 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_r5g6b5, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_over_n_8_0565 }
,
1623 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_b5g6r5, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_over_n_8_0565 }
,
1624 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_r8g8b8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_over_n_8_0888 }
,
1625 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_b8g8r8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_over_n_8_0888 }
,
1626 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_a8r8g8b8, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_over_n_8_8888
}
,
1627 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_x8r8g8b8, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_over_n_8_8888
}
,
1628 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_a8b8g8r8, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_over_n_8_8888
}
,
1629 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_x8b8g8r8, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_over_n_8_8888
}
,
1630 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a1 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_a8r8g8b8, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_over_n_1_8888
}
,
1631 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a1 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_x8r8g8b8, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_over_n_1_8888
}
,
1632 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a1 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_a8b8g8r8, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_over_n_1_8888
}
,
1633 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a1 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_x8b8g8r8, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_over_n_1_8888
}
,
1634 PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a1 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_r5g6b5, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_over_n_1_0565 }
,
1635 PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a1 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_b5g6r5, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_over_n_1_0565 }
,
1636 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8r8g8b8, ((PIXMAN_a8r8g8b8 == (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))) | (1 << 8))), PIXMAN_a8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8888_8888_ca
}
,
1637 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8r8g8b8, ((PIXMAN_a8r8g8b8 == (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))) | (1 << 8))), PIXMAN_x8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8888_8888_ca
}
,
1638 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8r8g8b8, ((PIXMAN_a8r8g8b8 == (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))) | (1 << 8))), PIXMAN_r5g6b5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8888_0565_ca
}
,
1639 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8b8g8r8, ((PIXMAN_a8b8g8r8 == (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))) | (1 << 8))), PIXMAN_a8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8888_8888_ca
}
,
1640 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8b8g8r8, ((PIXMAN_a8b8g8r8 == (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))) | (1 << 8))), PIXMAN_x8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8888_8888_ca
}
,
1641 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8b8g8r8, ((PIXMAN_a8b8g8r8 == (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))) | (1 << 8))), PIXMAN_b5g6r5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8888_0565_ca
}
,
1642 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888){ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))) | (1 << 9))), PIXMAN_x8r8g8b8, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_over_x888_8_8888
}
,
1643 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888){ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))) | (1 << 9))), PIXMAN_a8r8g8b8, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_over_x888_8_8888
}
,
1644 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888){ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))) | (1 << 9))), PIXMAN_x8b8g8r8, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_over_x888_8_8888
}
,
1645 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888){ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))) | (1 << 9))), PIXMAN_a8b8g8r8, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_over_x888_8_8888
}
,
1646 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_over_8888_8888 }
,
1647 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_over_8888_8888 }
,
1648 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_over_8888_0565 }
,
1649 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_over_8888_8888 }
,
1650 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_over_8888_8888 }
,
1651 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_b5g6r5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_over_8888_0565 }
,
1652 PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888){ PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_add_8888_8888 }
,
1653 PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888){ PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_add_8888_8888 }
,
1654 PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8){ PIXMAN_OP_ADD, PIXMAN_a8, (((1 << 2) | (1 << 5)
| (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_a8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_add_8_8
}
,
1655 PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1000_1000){ PIXMAN_OP_ADD, PIXMAN_a1, (((1 << 2) | (1 << 5)
| (1 << 1) | (1 << 6)) | ((PIXMAN_a1 == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_a1
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_add_1000_1000
}
,
1656 PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca){ PIXMAN_OP_ADD, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8r8g8b8, ((PIXMAN_a8r8g8b8 == (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))) | (1 << 8))), PIXMAN_a8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_add_n_8888_8888_ca
}
,
1657 PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8){ PIXMAN_OP_ADD, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_a8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_add_n_8_8 }
,
1658 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_solid_fill
}
,
1659 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_solid_fill
}
,
1660 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_solid_fill
}
,
1661 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_solid_fill
}
,
1662 PIXMAN_STD_FAST_PATH (SRC, solid, null, a1, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_a1
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_solid_fill
}
,
1663 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_a8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_solid_fill
}
,
1664 PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_solid_fill
}
,
1665 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, fast_composite_src_x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_x888_8888 }
,
1666 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, fast_composite_src_x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_x888_8888 }
,
1667 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1668 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1669 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1670 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1671 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1672 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1673 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8x8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b8g8r8a8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b8g8r8a8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_b8g8r8x8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1674 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8a8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b8g8r8a8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b8g8r8a8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_b8g8r8a8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1675 PIXMAN_STD_FAST_PATH (SRC, b8g8r8x8, null, b8g8r8x8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b8g8r8x8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b8g8r8x8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_b8g8r8x8, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1676 PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_r5g6b5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_r5g6b5 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_src_memcpy }
,
1677 PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b5g6r5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b5g6r5 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_b5g6r5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_src_memcpy }
,
1678 PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_r8g8b8 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_src_memcpy }
,
1679 PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, b8g8r8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b8g8r8 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_src_memcpy }
,
1680 PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_x1r5g5b5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x1r5g5b5 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_x1r5g5b5, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1681 PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a1r5g5b5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a1r5g5b5 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_x1r5g5b5, ((1 << 5) | (1 << 1) | (1
<< 6)), fast_composite_src_memcpy }
,
1682 PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8, (((1 << 2) | (1 << 5)
| (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_a8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1683 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_src_x888_0565 }
,
1684 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_src_x888_0565 }
,
1685 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_b5g6r5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_src_x888_0565 }
,
1686 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
16) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 16) | (1 << 0)))) | (1 <<
9))), PIXMAN_b5g6r5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_src_x888_0565 }
,
1687 PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8){ PIXMAN_OP_IN, PIXMAN_a8, (((1 << 2) | (1 << 5) |
(1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), (((((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))) == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | (((((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))) == (((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
1 << 16) | (1 << 0)))) | (1 << 9))), PIXMAN_a8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_in_8_8
}
,
1688 PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8){ PIXMAN_OP_IN, (((0) << 24) | ((1) << 16) | ((0)
<< 12) | ((0) << 8) | ((0) << 4) | ((0))),
(((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 <<
0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6)) | ((PIXMAN_a8 == (((0) << 24) | (
(1) << 16) | ((0) << 12) | ((0) << 8) | ((0
) << 4) | ((0)))) ? 0 : ((1 << 16) | (1 << 0
)))) | (1 << 9))), PIXMAN_a8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_in_n_8_8 }
,
1689
1690 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_x8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1691 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1692 SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_x8b8g8r8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1693 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8b8g8r8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1694
1695 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1696 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8b8g8r8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1697
1698 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, 8888_565){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_x8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_normal_SRC, }
,
1699 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, 8888_565){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_normal_SRC, }
,
1700
1701 SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, 565_565){ PIXMAN_OP_SRC, PIXMAN_r5g6b5, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_565_565_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_r5g6b5, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 14) | (1 << 3) | (1 << 4)) | (1 <<
17)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_565_565_none_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_r5g6b5, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
17)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_565_565_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_r5g6b5, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 3) | (1 << 4)) | (1 <<
17)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_565_565_normal_SRC
, }
,
1702
1703 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, 8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_OVER, }, {
PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
17)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_OVER, }
,
1704 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, 8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_OVER, }, {
PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
17)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_OVER, }
,
1705 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, 8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_OVER, }, {
PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
17)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_OVER, }
,
1706 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, 8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_OVER, }, {
PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
17)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_OVER, }
,
1707
1708 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, 8888_565){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 16), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_none_OVER, }, { PIXMAN_OP_OVER
, PIXMAN_a8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 17)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 17)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_normal_OVER, }
,
1709
1710#define NEAREST_FAST_PATH(op,s,d){ PIXMAN_OP_op, PIXMAN_s, ((1 << 10) | (1 << 1) |
(1 << 11) | (1 << 5) | (1 << 6)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_d, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
\
1711 { PIXMAN_OP_ ## op, \
1712 PIXMAN_ ## s, SCALED_NEAREST_FLAGS((1 << 10) | (1 << 1) | (1 << 11) | (1 <<
5) | (1 << 6))
, \
1713 PIXMAN_null(((0) << 24) | ((0) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))
, 0, \
1714 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS((1 << 5) | (1 << 1) | (1 << 6)), \
1715 fast_composite_scaled_nearest, \
1716 }
1717
1718 NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1719 NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1720 NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1721 NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1722
1723 NEAREST_FAST_PATH (SRC, x8r8g8b8, a8r8g8b8){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1724 NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1725 NEAREST_FAST_PATH (SRC, x8b8g8r8, a8b8g8r8){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1726 NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1727
1728 NEAREST_FAST_PATH (OVER, x8r8g8b8, x8r8g8b8){ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1729 NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1730 NEAREST_FAST_PATH (OVER, x8b8g8r8, x8b8g8r8){ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1731 NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1732
1733 NEAREST_FAST_PATH (OVER, x8r8g8b8, a8r8g8b8){ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1734 NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1735 NEAREST_FAST_PATH (OVER, x8b8g8r8, a8b8g8r8){ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1736 NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1737
1738 { PIXMAN_OP_NONE },
1739};
1740
1741#ifdef WORDS_BIGENDIAN
1742#define A1_FILL_MASK(n, offs)(((1 << (n)) - 1) << (offs)) (((1 << (n)) - 1) << (32 - (offs) - (n)))
1743#else
1744#define A1_FILL_MASK(n, offs)(((1 << (n)) - 1) << (offs)) (((1 << (n)) - 1) << (offs))
1745#endif
1746
1747static force_inline__inline__ __attribute__ ((__always_inline__)) void
1748pixman_fill1_line (uint32_t *dst, int offs, int width, int v)
1749{
1750 if (offs)
1751 {
1752 int leading_pixels = 32 - offs;
1753 if (leading_pixels >= width)
1754 {
1755 if (v)
1756 *dst |= A1_FILL_MASK (width, offs)(((1 << (width)) - 1) << (offs));
1757 else
1758 *dst &= ~A1_FILL_MASK (width, offs)(((1 << (width)) - 1) << (offs));
1759 return;
1760 }
1761 else
1762 {
1763 if (v)
1764 *dst++ |= A1_FILL_MASK (leading_pixels, offs)(((1 << (leading_pixels)) - 1) << (offs));
1765 else
1766 *dst++ &= ~A1_FILL_MASK (leading_pixels, offs)(((1 << (leading_pixels)) - 1) << (offs));
1767 width -= leading_pixels;
1768 }
1769 }
1770 while (width >= 32)
1771 {
1772 if (v)
1773 *dst++ = 0xFFFFFFFF;
1774 else
1775 *dst++ = 0;
1776 width -= 32;
1777 }
1778 if (width > 0)
1779 {
1780 if (v)
1781 *dst |= A1_FILL_MASK (width, 0)(((1 << (width)) - 1) << (0));
1782 else
1783 *dst &= ~A1_FILL_MASK (width, 0)(((1 << (width)) - 1) << (0));
1784 }
1785}
1786
1787static void
1788pixman_fill1 (uint32_t *bits,
1789 int stride,
1790 int x,
1791 int y,
1792 int width,
1793 int height,
1794 uint32_t xor)
1795{
1796 uint32_t *dst = bits + y * stride + (x >> 5);
1797 int offs = x & 31;
1798
1799 if (xor & 1)
1800 {
1801 while (height--)
1802 {
1803 pixman_fill1_line (dst, offs, width, 1);
1804 dst += stride;
1805 }
1806 }
1807 else
1808 {
1809 while (height--)
1810 {
1811 pixman_fill1_line (dst, offs, width, 0);
1812 dst += stride;
1813 }
1814 }
1815}
1816
1817static void
1818pixman_fill8 (uint32_t *bits,
1819 int stride,
1820 int x,
1821 int y,
1822 int width,
1823 int height,
1824 uint32_t xor)
1825{
1826 int byte_stride = stride * (int) sizeof (uint32_t);
1827 uint8_t *dst = (uint8_t *) bits;
1828 uint8_t v = xor & 0xff;
1829 int i;
1830
1831 dst = dst + y * byte_stride + x;
1832
1833 while (height--)
1834 {
1835 for (i = 0; i < width; ++i)
1836 dst[i] = v;
1837
1838 dst += byte_stride;
1839 }
1840}
1841
1842static void
1843pixman_fill16 (uint32_t *bits,
1844 int stride,
1845 int x,
1846 int y,
1847 int width,
1848 int height,
1849 uint32_t xor)
1850{
1851 int short_stride =
1852 (stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t);
1853 uint16_t *dst = (uint16_t *)bits;
1854 uint16_t v = xor & 0xffff;
1855 int i;
1856
1857 dst = dst + y * short_stride + x;
1858
1859 while (height--)
1860 {
1861 for (i = 0; i < width; ++i)
1862 dst[i] = v;
1863
1864 dst += short_stride;
1865 }
1866}
1867
1868static void
1869pixman_fill32 (uint32_t *bits,
1870 int stride,
1871 int x,
1872 int y,
1873 int width,
1874 int height,
1875 uint32_t xor)
1876{
1877 int i;
1878
1879 bits = bits + y * stride + x;
1880
1881 while (height--)
1882 {
1883 for (i = 0; i < width; ++i)
1884 bits[i] = xor;
1885
1886 bits += stride;
1887 }
1888}
1889
1890static pixman_bool_t
1891fast_path_fill (pixman_implementation_t *imp,
1892 uint32_t * bits,
1893 int stride,
1894 int bpp,
1895 int x,
1896 int y,
1897 int width,
1898 int height,
1899 uint32_t xor)
1900{
1901 switch (bpp)
1902 {
1903 case 1:
1904 pixman_fill1 (bits, stride, x, y, width, height, xor);
1905 break;
1906
1907 case 8:
1908 pixman_fill8 (bits, stride, x, y, width, height, xor);
1909 break;
1910
1911 case 16:
1912 pixman_fill16 (bits, stride, x, y, width, height, xor);
1913 break;
1914
1915 case 32:
1916 pixman_fill32 (bits, stride, x, y, width, height, xor);
1917 break;
1918
1919 default:
1920 return _pixman_implementation_fill (
1921 imp->delegate, bits, stride, bpp, x, y, width, height, xor);
1922 break;
1923 }
1924
1925 return TRUE1;
1926}
1927
1928pixman_implementation_t *
1929_pixman_implementation_create_fast_path (void)
1930{
1931 pixman_implementation_t *general = _pixman_implementation_create_general ();
1932 pixman_implementation_t *imp = _pixman_implementation_create (general, c_fast_paths);
1933
1934 imp->fill = fast_path_fill;
1935
1936 return imp;
1937}