Bug Summary

File:pixman/pixman-fast-path.c
Location:line 1316, column 11
Description:Division by zero

Annotated Source Code

1/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
2/*
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
5 *
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of SuSE not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. SuSE makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
15 *
16 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 *
23 * Author: Keith Packard, SuSE, Inc.
24 */
25
26#ifdef HAVE_CONFIG_H1
27#include <config.h>
28#endif
29#include <string.h>
30#include <stdlib.h>
31#include "pixman-private.h"
32#include "pixman-combine32.h"
33#include "pixman-inlines.h"
34
35static force_inline__inline__ __attribute__ ((__always_inline__)) uint32_t
36fetch_24 (uint8_t *a)
37{
38 if (((unsigned long)a) & 1)
39 {
40#ifdef WORDS_BIGENDIAN1
41 return (*a << 16) | (*(uint16_t *)(a + 1));
42#else
43 return *a | (*(uint16_t *)(a + 1) << 8);
44#endif
45 }
46 else
47 {
48#ifdef WORDS_BIGENDIAN1
49 return (*(uint16_t *)a << 8) | *(a + 2);
50#else
51 return *(uint16_t *)a | (*(a + 2) << 16);
52#endif
53 }
54}
55
56static force_inline__inline__ __attribute__ ((__always_inline__)) void
57store_24 (uint8_t *a,
58 uint32_t v)
59{
60 if (((unsigned long)a) & 1)
61 {
62#ifdef WORDS_BIGENDIAN1
63 *a = (uint8_t) (v >> 16);
64 *(uint16_t *)(a + 1) = (uint16_t) (v);
65#else
66 *a = (uint8_t) (v);
67 *(uint16_t *)(a + 1) = (uint16_t) (v >> 8);
68#endif
69 }
70 else
71 {
72#ifdef WORDS_BIGENDIAN1
73 *(uint16_t *)a = (uint16_t)(v >> 8);
74 *(a + 2) = (uint8_t)v;
75#else
76 *(uint16_t *)a = (uint16_t)v;
77 *(a + 2) = (uint8_t)(v >> 16);
78#endif
79 }
80}
81
82static force_inline__inline__ __attribute__ ((__always_inline__)) uint32_t
83over (uint32_t src,
84 uint32_t dest)
85{
86 uint32_t a = ~src >> 24;
87
88 UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src)do { uint32_t r1__, r2__, r3__, t__; r1__ = (dest); r2__ = (src
) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a));
t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
dest) >> 8; r3__ = ((src) >> 8) & 0xff00ff; do
{ t__ = ((r2__) & 0xff00ff) * ((a)); t__ += 0x800080; r2__
= (t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__
&= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__
|= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__ = (t__
& 0xff00ff); } while (0); (dest) = r1__ | (r2__ <<
8); } while (0)
;
89
90 return dest;
91}
92
93static uint32_t
94in (uint32_t x,
95 uint8_t y)
96{
97 uint16_t a = y;
98
99 UN8x4_MUL_UN8 (x, a)do { uint32_t r1__, r2__, t__; r1__ = (x); do { t__ = ((r1__)
& 0xff00ff) * ((a)); t__ += 0x800080; r1__ = (t__ + ((t__
>> 8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff
; } while (0); r2__ = (x) >> 8; do { t__ = ((r2__) &
0xff00ff) * ((a)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); (x) = r1__ | (r2__ << 8); } while (0)
;
100
101 return x;
102}
103
104/*
105 * Naming convention:
106 *
107 * op_src_mask_dest
108 */
109static void
110fast_composite_over_x888_8_8888 (pixman_implementation_t *imp,
111 pixman_composite_info_t *info)
112{
113 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
114 uint32_t *src, *src_line;
115 uint32_t *dst, *dst_line;
116 uint8_t *mask, *mask_line;
117 int src_stride, mask_stride, dst_stride;
118 uint8_t m;
119 uint32_t s, d;
120 int32_t w;
121
122 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
123 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
124 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
125
126 while (height--)
127 {
128 src = src_line;
129 src_line += src_stride;
130 dst = dst_line;
131 dst_line += dst_stride;
132 mask = mask_line;
133 mask_line += mask_stride;
134
135 w = width;
136 while (w--)
137 {
138 m = *mask++;
139 if (m)
140 {
141 s = *src | 0xff000000;
142
143 if (m == 0xff)
144 {
145 *dst = s;
146 }
147 else
148 {
149 d = in (s, m);
150 *dst = over (d, *dst);
151 }
152 }
153 src++;
154 dst++;
155 }
156 }
157}
158
159static void
160fast_composite_in_n_8_8 (pixman_implementation_t *imp,
161 pixman_composite_info_t *info)
162{
163 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
164 uint32_t src, srca;
165 uint8_t *dst_line, *dst;
166 uint8_t *mask_line, *mask, m;
167 int dst_stride, mask_stride;
168 int32_t w;
169 uint16_t t;
170
171 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
172
173 srca = src >> 24;
174
175 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
176 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
177
178 if (srca == 0xff)
179 {
180 while (height--)
181 {
182 dst = dst_line;
183 dst_line += dst_stride;
184 mask = mask_line;
185 mask_line += mask_stride;
186 w = width;
187
188 while (w--)
189 {
190 m = *mask++;
191
192 if (m == 0)
193 *dst = 0;
194 else if (m != 0xff)
195 *dst = MUL_UN8 (m, *dst, t)((t) = (m) * (uint16_t)(*dst) + 0x80, ((((t) >> 8 ) + (
t) ) >> 8 ))
;
196
197 dst++;
198 }
199 }
200 }
201 else
202 {
203 while (height--)
204 {
205 dst = dst_line;
206 dst_line += dst_stride;
207 mask = mask_line;
208 mask_line += mask_stride;
209 w = width;
210
211 while (w--)
212 {
213 m = *mask++;
214 m = MUL_UN8 (m, srca, t)((t) = (m) * (uint16_t)(srca) + 0x80, ((((t) >> 8 ) + (
t) ) >> 8 ))
;
215
216 if (m == 0)
217 *dst = 0;
218 else if (m != 0xff)
219 *dst = MUL_UN8 (m, *dst, t)((t) = (m) * (uint16_t)(*dst) + 0x80, ((((t) >> 8 ) + (
t) ) >> 8 ))
;
220
221 dst++;
222 }
223 }
224 }
225}
226
227static void
228fast_composite_in_8_8 (pixman_implementation_t *imp,
229 pixman_composite_info_t *info)
230{
231 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
232 uint8_t *dst_line, *dst;
233 uint8_t *src_line, *src;
234 int dst_stride, src_stride;
235 int32_t w;
236 uint8_t s;
237 uint16_t t;
238
239 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (src_line) = ((uint8_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
240 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
241
242 while (height--)
243 {
244 dst = dst_line;
245 dst_line += dst_stride;
246 src = src_line;
247 src_line += src_stride;
248 w = width;
249
250 while (w--)
251 {
252 s = *src++;
253
254 if (s == 0)
255 *dst = 0;
256 else if (s != 0xff)
257 *dst = MUL_UN8 (s, *dst, t)((t) = (s) * (uint16_t)(*dst) + 0x80, ((((t) >> 8 ) + (
t) ) >> 8 ))
;
258
259 dst++;
260 }
261 }
262}
263
264static void
265fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
266 pixman_composite_info_t *info)
267{
268 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
269 uint32_t src, srca;
270 uint32_t *dst_line, *dst, d;
271 uint8_t *mask_line, *mask, m;
272 int dst_stride, mask_stride;
273 int32_t w;
274
275 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
276
277 srca = src >> 24;
278 if (src == 0)
279 return;
280
281 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
282 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
283
284 while (height--)
285 {
286 dst = dst_line;
287 dst_line += dst_stride;
288 mask = mask_line;
289 mask_line += mask_stride;
290 w = width;
291
292 while (w--)
293 {
294 m = *mask++;
295 if (m == 0xff)
296 {
297 if (srca == 0xff)
298 *dst = src;
299 else
300 *dst = over (src, *dst);
301 }
302 else if (m)
303 {
304 d = in (src, m);
305 *dst = over (d, *dst);
306 }
307 dst++;
308 }
309 }
310}
311
312static void
313fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
314 pixman_composite_info_t *info)
315{
316 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
317 uint32_t src, s;
318 uint32_t *dst_line, *dst, d;
319 uint32_t *mask_line, *mask, ma;
320 int dst_stride, mask_stride;
321 int32_t w;
322
323 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
324
325 if (src == 0)
326 return;
327
328 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
329 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
330
331 while (height--)
332 {
333 dst = dst_line;
334 dst_line += dst_stride;
335 mask = mask_line;
336 mask_line += mask_stride;
337 w = width;
338
339 while (w--)
340 {
341 ma = *mask++;
342
343 if (ma)
344 {
345 d = *dst;
346 s = src;
347
348 UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (d) & 0xff00ff
; do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = ((s) >> 8); r3__ = ((ma) >> 8); do { t__
= (r2__ & 0xff) * (r3__ & 0xff); t__ |= (r2__ & 0xff0000
) * ((r3__ >> 8 * 2) & 0xff); t__ += 0x800080; t__ =
(t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ =
t__ & 0xff00ff; } while (0); r3__ = ((d) >> 8) &
0xff00ff; do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - (
(t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff)
; } while (0); (s) = r1__ | (r2__ << 8); } while (0)
;
349
350 *dst = s;
351 }
352
353 dst++;
354 }
355 }
356}
357
358static void
359fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
360 pixman_composite_info_t *info)
361{
362 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
363 uint32_t src, srca, s;
364 uint32_t *dst_line, *dst, d;
365 uint32_t *mask_line, *mask, ma;
366 int dst_stride, mask_stride;
367 int32_t w;
368
369 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
370
371 srca = src >> 24;
372 if (src == 0)
373 return;
374
375 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
376 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
377
378 while (height--)
379 {
380 dst = dst_line;
381 dst_line += dst_stride;
382 mask = mask_line;
383 mask_line += mask_stride;
384 w = width;
385
386 while (w--)
387 {
388 ma = *mask++;
389 if (ma == 0xffffffff)
390 {
391 if (srca == 0xff)
392 *dst = src;
393 else
394 *dst = over (src, *dst);
395 }
396 else if (ma)
397 {
398 d = *dst;
399 s = src;
400
401 UN8x4_MUL_UN8x4 (s, ma)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (s) >>
8; r3__ = (ma) >> 8; do { t__ = (r2__ & 0xff) * (r3__
& 0xff); t__ |= (r2__ & 0xff0000) * ((r3__ >> 8
* 2) & 0xff); t__ += 0x800080; t__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ = t__ & 0xff00ff; }
while (0); (s) = r1__ | (r2__ << 8); } while (0)
;
402 UN8x4_MUL_UN8 (ma, srca)do { uint32_t r1__, r2__, t__; r1__ = (ma); do { t__ = ((r1__
) & 0xff00ff) * ((srca)); t__ += 0x800080; r1__ = (t__ + (
(t__ >> 8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff
; } while (0); r2__ = (ma) >> 8; do { t__ = ((r2__) &
0xff00ff) * ((srca)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); (ma) = r1__ | (r2__ << 8); } while (0)
;
403 ma = ~ma;
404 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s)do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (s) & 0xff00ff
; do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = ((d) >> 8); r3__ = ((ma) >> 8); do { t__
= (r2__ & 0xff) * (r3__ & 0xff); t__ |= (r2__ & 0xff0000
) * ((r3__ >> 8 * 2) & 0xff); t__ += 0x800080; t__ =
(t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ =
t__ & 0xff00ff; } while (0); r3__ = ((s) >> 8) &
0xff00ff; do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - (
(t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff)
; } while (0); (d) = r1__ | (r2__ << 8); } while (0)
;
405
406 *dst = d;
407 }
408
409 dst++;
410 }
411 }
412}
413
414static void
415fast_composite_over_n_8_0888 (pixman_implementation_t *imp,
416 pixman_composite_info_t *info)
417{
418 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
419 uint32_t src, srca;
420 uint8_t *dst_line, *dst;
421 uint32_t d;
422 uint8_t *mask_line, *mask, m;
423 int dst_stride, mask_stride;
424 int32_t w;
425
426 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
427
428 srca = src >> 24;
429 if (src == 0)
430 return;
431
432 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (3) * (dest_x); } while (0)
;
433 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
434
435 while (height--)
436 {
437 dst = dst_line;
438 dst_line += dst_stride;
439 mask = mask_line;
440 mask_line += mask_stride;
441 w = width;
442
443 while (w--)
444 {
445 m = *mask++;
446 if (m == 0xff)
447 {
448 if (srca == 0xff)
449 {
450 d = src;
451 }
452 else
453 {
454 d = fetch_24 (dst);
455 d = over (src, d);
456 }
457 store_24 (dst, d);
458 }
459 else if (m)
460 {
461 d = over (in (src, m), fetch_24 (dst));
462 store_24 (dst, d);
463 }
464 dst += 3;
465 }
466 }
467}
468
469static void
470fast_composite_over_n_8_0565 (pixman_implementation_t *imp,
471 pixman_composite_info_t *info)
472{
473 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
474 uint32_t src, srca;
475 uint16_t *dst_line, *dst;
476 uint32_t d;
477 uint8_t *mask_line, *mask, m;
478 int dst_stride, mask_stride;
479 int32_t w;
480
481 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
482
483 srca = src >> 24;
484 if (src == 0)
485 return;
486
487 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
488 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
489
490 while (height--)
491 {
492 dst = dst_line;
493 dst_line += dst_stride;
494 mask = mask_line;
495 mask_line += mask_stride;
496 w = width;
497
498 while (w--)
499 {
500 m = *mask++;
501 if (m == 0xff)
502 {
503 if (srca == 0xff)
504 {
505 d = src;
506 }
507 else
508 {
509 d = *dst;
510 d = over (src, CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
);
511 }
512 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
513 }
514 else if (m)
515 {
516 d = *dst;
517 d = over (in (src, m), CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
);
518 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
519 }
520 dst++;
521 }
522 }
523}
524
525static void
526fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
527 pixman_composite_info_t *info)
528{
529 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
530 uint32_t src, srca, s;
531 uint16_t src16;
532 uint16_t *dst_line, *dst;
533 uint32_t d;
534 uint32_t *mask_line, *mask, ma;
535 int dst_stride, mask_stride;
536 int32_t w;
537
538 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
539
540 srca = src >> 24;
541 if (src == 0)
542 return;
543
544 src16 = CONVERT_8888_TO_0565 (src)((((src) >> 3) & 0x001f) | (((src) >> 5) &
0x07e0) | (((src) >> 8) & 0xf800))
;
545
546 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
547 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
548
549 while (height--)
550 {
551 dst = dst_line;
552 dst_line += dst_stride;
553 mask = mask_line;
554 mask_line += mask_stride;
555 w = width;
556
557 while (w--)
558 {
559 ma = *mask++;
560 if (ma == 0xffffffff)
561 {
562 if (srca == 0xff)
563 {
564 *dst = src16;
565 }
566 else
567 {
568 d = *dst;
569 d = over (src, CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
);
570 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
571 }
572 }
573 else if (ma)
574 {
575 d = *dst;
576 d = CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
;
577
578 s = src;
579
580 UN8x4_MUL_UN8x4 (s, ma)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (s) >>
8; r3__ = (ma) >> 8; do { t__ = (r2__ & 0xff) * (r3__
& 0xff); t__ |= (r2__ & 0xff0000) * ((r3__ >> 8
* 2) & 0xff); t__ += 0x800080; t__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ = t__ & 0xff00ff; }
while (0); (s) = r1__ | (r2__ << 8); } while (0)
;
581 UN8x4_MUL_UN8 (ma, srca)do { uint32_t r1__, r2__, t__; r1__ = (ma); do { t__ = ((r1__
) & 0xff00ff) * ((srca)); t__ += 0x800080; r1__ = (t__ + (
(t__ >> 8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff
; } while (0); r2__ = (ma) >> 8; do { t__ = ((r2__) &
0xff00ff) * ((srca)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); (ma) = r1__ | (r2__ << 8); } while (0)
;
582 ma = ~ma;
583 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s)do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (ma);
do { t__ = (r1__ & 0xff) * (r2__ & 0xff); t__ |= (r1__
& 0xff0000) * ((r2__ >> 8 * 2) & 0xff); t__ +=
0x800080; t__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ = t__ & 0xff00ff; } while (0); r2__ = (s) & 0xff00ff
; do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = ((d) >> 8); r3__ = ((ma) >> 8); do { t__
= (r2__ & 0xff) * (r3__ & 0xff); t__ |= (r2__ & 0xff0000
) * ((r3__ >> 8 * 2) & 0xff); t__ += 0x800080; t__ =
(t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ =
t__ & 0xff00ff; } while (0); r3__ = ((s) >> 8) &
0xff00ff; do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - (
(t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff)
; } while (0); (d) = r1__ | (r2__ << 8); } while (0)
;
584
585 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
586 }
587 dst++;
588 }
589 }
590}
591
592static void
593fast_composite_over_8888_8888 (pixman_implementation_t *imp,
594 pixman_composite_info_t *info)
595{
596 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
597 uint32_t *dst_line, *dst;
598 uint32_t *src_line, *src, s;
599 int dst_stride, src_stride;
600 uint8_t a;
601 int32_t w;
602
603 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
604 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
605
606 while (height--)
607 {
608 dst = dst_line;
609 dst_line += dst_stride;
610 src = src_line;
611 src_line += src_stride;
612 w = width;
613
614 while (w--)
615 {
616 s = *src++;
617 a = s >> 24;
618 if (a == 0xff)
619 *dst = s;
620 else if (s)
621 *dst = over (s, *dst);
622 dst++;
623 }
624 }
625}
626
627static void
628fast_composite_src_x888_8888 (pixman_implementation_t *imp,
629 pixman_composite_info_t *info)
630{
631 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
632 uint32_t *dst_line, *dst;
633 uint32_t *src_line, *src;
634 int dst_stride, src_stride;
635 int32_t w;
636
637 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
638 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
639
640 while (height--)
641 {
642 dst = dst_line;
643 dst_line += dst_stride;
644 src = src_line;
645 src_line += src_stride;
646 w = width;
647
648 while (w--)
649 *dst++ = (*src++) | 0xff000000;
650 }
651}
652
653#if 0
654static void
655fast_composite_over_8888_0888 (pixman_implementation_t *imp,
656 pixman_composite_info_t *info)
657{
658 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
659 uint8_t *dst_line, *dst;
660 uint32_t d;
661 uint32_t *src_line, *src, s;
662 uint8_t a;
663 int dst_stride, src_stride;
664 int32_t w;
665
666 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (3) * (dest_x); } while (0)
;
667 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
668
669 while (height--)
670 {
671 dst = dst_line;
672 dst_line += dst_stride;
673 src = src_line;
674 src_line += src_stride;
675 w = width;
676
677 while (w--)
678 {
679 s = *src++;
680 a = s >> 24;
681 if (a)
682 {
683 if (a == 0xff)
684 d = s;
685 else
686 d = over (s, fetch_24 (dst));
687
688 store_24 (dst, d);
689 }
690 dst += 3;
691 }
692 }
693}
694#endif
695
696static void
697fast_composite_over_8888_0565 (pixman_implementation_t *imp,
698 pixman_composite_info_t *info)
699{
700 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
701 uint16_t *dst_line, *dst;
702 uint32_t d;
703 uint32_t *src_line, *src, s;
704 uint8_t a;
705 int dst_stride, src_stride;
706 int32_t w;
707
708 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
709 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
710
711 while (height--)
712 {
713 dst = dst_line;
714 dst_line += dst_stride;
715 src = src_line;
716 src_line += src_stride;
717 w = width;
718
719 while (w--)
720 {
721 s = *src++;
722 a = s >> 24;
723 if (s)
724 {
725 if (a == 0xff)
726 {
727 d = s;
728 }
729 else
730 {
731 d = *dst;
732 d = over (s, CONVERT_0565_TO_0888 (d)(((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000)))
);
733 }
734 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
735 }
736 dst++;
737 }
738 }
739}
740
741static void
742fast_composite_src_x888_0565 (pixman_implementation_t *imp,
743 pixman_composite_info_t *info)
744{
745 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
746 uint16_t *dst_line, *dst;
747 uint32_t *src_line, *src, s;
748 int dst_stride, src_stride;
749 int32_t w;
750
751 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
752 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
753
754 while (height--)
755 {
756 dst = dst_line;
757 dst_line += dst_stride;
758 src = src_line;
759 src_line += src_stride;
760 w = width;
761
762 while (w--)
763 {
764 s = *src++;
765 *dst = CONVERT_8888_TO_0565 (s)((((s) >> 3) & 0x001f) | (((s) >> 5) & 0x07e0
) | (((s) >> 8) & 0xf800))
;
766 dst++;
767 }
768 }
769}
770
771static void
772fast_composite_add_8_8 (pixman_implementation_t *imp,
773 pixman_composite_info_t *info)
774{
775 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
776 uint8_t *dst_line, *dst;
777 uint8_t *src_line, *src;
778 int dst_stride, src_stride;
779 int32_t w;
780 uint8_t s, d;
781 uint16_t t;
782
783 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (src_line) = ((uint8_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
784 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
785
786 while (height--)
787 {
788 dst = dst_line;
789 dst_line += dst_stride;
790 src = src_line;
791 src_line += src_stride;
792 w = width;
793
794 while (w--)
795 {
796 s = *src++;
797 if (s)
798 {
799 if (s != 0xff)
800 {
801 d = *dst;
802 t = d + s;
803 s = t | (0 - (t >> 8));
804 }
805 *dst = s;
806 }
807 dst++;
808 }
809 }
810}
811
812static void
813fast_composite_add_0565_0565 (pixman_implementation_t *imp,
814 pixman_composite_info_t *info)
815{
816 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
817 uint16_t *dst_line, *dst;
818 uint32_t d;
819 uint16_t *src_line, *src;
820 uint32_t s;
821 int dst_stride, src_stride;
822 int32_t w;
823
824 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_line) = ((uint16_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
825 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
826
827 while (height--)
828 {
829 dst = dst_line;
830 dst_line += dst_stride;
831 src = src_line;
832 src_line += src_stride;
833 w = width;
834
835 while (w--)
836 {
837 s = *src++;
838 if (s)
839 {
840 d = *dst;
841 s = CONVERT_0565_TO_8888 (s)((((((s) << 3) & 0xf8) | (((s) >> 2) & 0x7
)) | ((((s) << 5) & 0xfc00) | (((s) >> 1) &
0x300)) | ((((s) << 8) & 0xf80000) | (((s) <<
3) & 0x70000))) | 0xff000000)
;
842 if (d)
843 {
844 d = CONVERT_0565_TO_8888 (d)((((((d) << 3) & 0xf8) | (((d) >> 2) & 0x7
)) | ((((d) << 5) & 0xfc00) | (((d) >> 1) &
0x300)) | ((((d) << 8) & 0xf80000) | (((d) <<
3) & 0x70000))) | 0xff000000)
;
845 UN8x4_ADD_UN8x4 (s, d)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s) & 0xff00ff
; r2__ = (d) & 0xff00ff; do { t__ = ((r1__) + (r2__)); t__
|= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__ = (t__
& 0xff00ff); } while (0); r2__ = ((s) >> 8) & 0xff00ff
; r3__ = ((d) >> 8) & 0xff00ff; do { t__ = ((r2__) +
(r3__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff
); r2__ = (t__ & 0xff00ff); } while (0); s = r1__ | (r2__
<< 8); } while (0)
;
846 }
847 *dst = CONVERT_8888_TO_0565 (s)((((s) >> 3) & 0x001f) | (((s) >> 5) & 0x07e0
) | (((s) >> 8) & 0xf800))
;
848 }
849 dst++;
850 }
851 }
852}
853
854static void
855fast_composite_add_8888_8888 (pixman_implementation_t *imp,
856 pixman_composite_info_t *info)
857{
858 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
859 uint32_t *dst_line, *dst;
860 uint32_t *src_line, *src;
861 int dst_stride, src_stride;
862 int32_t w;
863 uint32_t s, d;
864
865 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (src_x); } while (0)
;
866 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
867
868 while (height--)
869 {
870 dst = dst_line;
871 dst_line += dst_stride;
872 src = src_line;
873 src_line += src_stride;
874 w = width;
875
876 while (w--)
877 {
878 s = *src++;
879 if (s)
880 {
881 if (s != 0xffffffff)
882 {
883 d = *dst;
884 if (d)
885 UN8x4_ADD_UN8x4 (s, d)do { uint32_t r1__, r2__, r3__, t__; r1__ = (s) & 0xff00ff
; r2__ = (d) & 0xff00ff; do { t__ = ((r1__) + (r2__)); t__
|= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__ = (t__
& 0xff00ff); } while (0); r2__ = ((s) >> 8) & 0xff00ff
; r3__ = ((d) >> 8) & 0xff00ff; do { t__ = ((r2__) +
(r3__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff
); r2__ = (t__ & 0xff00ff); } while (0); s = r1__ | (r2__
<< 8); } while (0)
;
886 }
887 *dst = s;
888 }
889 dst++;
890 }
891 }
892}
893
894static void
895fast_composite_add_n_8_8 (pixman_implementation_t *imp,
896 pixman_composite_info_t *info)
897{
898 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
899 uint8_t *dst_line, *dst;
900 uint8_t *mask_line, *mask;
901 int dst_stride, mask_stride;
902 int32_t w;
903 uint32_t src;
904 uint8_t sa;
905
906 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
907 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (mask_line) = ((uint8_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (mask_x); } while (0)
;
908 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
909 sa = (src >> 24);
910
911 while (height--)
912 {
913 dst = dst_line;
914 dst_line += dst_stride;
915 mask = mask_line;
916 mask_line += mask_stride;
917 w = width;
918
919 while (w--)
920 {
921 uint16_t tmp;
922 uint16_t a;
923 uint32_t m, d;
924 uint32_t r;
925
926 a = *mask++;
927 d = *dst;
928
929 m = MUL_UN8 (sa, a, tmp)((tmp) = (sa) * (uint16_t)(a) + 0x80, ((((tmp) >> 8 ) +
(tmp) ) >> 8 ))
;
930 r = ADD_UN8 (m, d, tmp)((tmp) = (m) + (d), (uint32_t) (uint8_t) ((tmp) | (0 - ((tmp)
>> 8))))
;
931
932 *dst++ = r;
933 }
934 }
935}
936
937#ifdef WORDS_BIGENDIAN1
938#define CREATE_BITMASK(n)(0x80000000 >> (n)) (0x80000000 >> (n))
939#define UPDATE_BITMASK(n)((n) >> 1) ((n) >> 1)
940#else
941#define CREATE_BITMASK(n)(0x80000000 >> (n)) (1 << (n))
942#define UPDATE_BITMASK(n)((n) >> 1) ((n) << 1)
943#endif
944
945#define TEST_BIT(p, n)(*((p) + ((n) >> 5)) & (0x80000000 >> ((n) &
31)))
\
946 (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31)(0x80000000 >> ((n) & 31)))
947#define SET_BIT(p, n)do { *((p) + ((n) >> 5)) |= (0x80000000 >> ((n) &
31)); } while (0);
\
948 do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31)(0x80000000 >> ((n) & 31)); } while (0);
949
950static void
951fast_composite_add_1_1 (pixman_implementation_t *imp,
952 pixman_composite_info_t *info)
953{
954 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
955 uint32_t *dst_line, *dst;
956 uint32_t *src_line, *src;
957 int dst_stride, src_stride;
958 int32_t w;
959
960 PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (0); } while (0)
961 src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y
) + (1) * (0); } while (0)
;
962 PIXMAN_IMAGE_GET_LINE (dest_image, 0, dest_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (0); } while (0)
963 dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (0); } while (0)
;
964
965 while (height--)
966 {
967 dst = dst_line;
968 dst_line += dst_stride;
969 src = src_line;
970 src_line += src_stride;
971 w = width;
972
973 while (w--)
974 {
975 /*
976 * TODO: improve performance by processing uint32_t data instead
977 * of individual bits
978 */
979 if (TEST_BIT (src, src_x + w)(*((src) + ((src_x + w) >> 5)) & (0x80000000 >>
((src_x + w) & 31)))
)
980 SET_BIT (dst, dest_x + w)do { *((dst) + ((dest_x + w) >> 5)) |= (0x80000000 >>
((dest_x + w) & 31)); } while (0);
;
981 }
982 }
983}
984
985static void
986fast_composite_over_n_1_8888 (pixman_implementation_t *imp,
987 pixman_composite_info_t *info)
988{
989 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
990 uint32_t src, srca;
991 uint32_t *dst, *dst_line;
992 uint32_t *mask, *mask_line;
993 int mask_stride, dst_stride;
994 uint32_t bitcache, bitmask;
995 int32_t w;
996
997 if (width <= 0)
998 return;
999
1000 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1001 srca = src >> 24;
1002 if (src == 0)
1003 return;
1004
1005 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
1006 dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
1007 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (0); } while (0)
1008 mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (0); } while (0)
;
1009 mask_line += mask_x >> 5;
1010
1011 if (srca == 0xff)
1012 {
1013 while (height--)
1014 {
1015 dst = dst_line;
1016 dst_line += dst_stride;
1017 mask = mask_line;
1018 mask_line += mask_stride;
1019 w = width;
1020
1021 bitcache = *mask++;
1022 bitmask = CREATE_BITMASK (mask_x & 31)(0x80000000 >> (mask_x & 31));
1023
1024 while (w--)
1025 {
1026 if (bitmask == 0)
1027 {
1028 bitcache = *mask++;
1029 bitmask = CREATE_BITMASK (0)(0x80000000 >> (0));
1030 }
1031 if (bitcache & bitmask)
1032 *dst = src;
1033 bitmask = UPDATE_BITMASK (bitmask)((bitmask) >> 1);
1034 dst++;
1035 }
1036 }
1037 }
1038 else
1039 {
1040 while (height--)
1041 {
1042 dst = dst_line;
1043 dst_line += dst_stride;
1044 mask = mask_line;
1045 mask_line += mask_stride;
1046 w = width;
1047
1048 bitcache = *mask++;
1049 bitmask = CREATE_BITMASK (mask_x & 31)(0x80000000 >> (mask_x & 31));
1050
1051 while (w--)
1052 {
1053 if (bitmask == 0)
1054 {
1055 bitcache = *mask++;
1056 bitmask = CREATE_BITMASK (0)(0x80000000 >> (0));
1057 }
1058 if (bitcache & bitmask)
1059 *dst = over (src, *dst);
1060 bitmask = UPDATE_BITMASK (bitmask)((bitmask) >> 1);
1061 dst++;
1062 }
1063 }
1064 }
1065}
1066
1067static void
1068fast_composite_over_n_1_0565 (pixman_implementation_t *imp,
1069 pixman_composite_info_t *info)
1070{
1071 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
1072 uint32_t src, srca;
1073 uint16_t *dst, *dst_line;
1074 uint32_t *mask, *mask_line;
1075 int mask_stride, dst_stride;
1076 uint32_t bitcache, bitmask;
1077 int32_t w;
1078 uint32_t d;
1079 uint16_t src565;
1080
1081 if (width <= 0)
1082 return;
1083
1084 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1085 srca = src >> 24;
1086 if (src == 0)
1087 return;
1088
1089 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t,do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
1090 dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
1091 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (0); } while (0)
1092 mask_stride, mask_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = mask_image
->bits.bits; __stride__ = mask_image->bits.rowstride; (
mask_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (mask_line) = ((uint32_t *) __bits__) + (mask_stride
) * (mask_y) + (1) * (0); } while (0)
;
1093 mask_line += mask_x >> 5;
1094
1095 if (srca == 0xff)
1096 {
1097 src565 = CONVERT_8888_TO_0565 (src)((((src) >> 3) & 0x001f) | (((src) >> 5) &
0x07e0) | (((src) >> 8) & 0xf800))
;
1098 while (height--)
1099 {
1100 dst = dst_line;
1101 dst_line += dst_stride;
1102 mask = mask_line;
1103 mask_line += mask_stride;
1104 w = width;
1105
1106 bitcache = *mask++;
1107 bitmask = CREATE_BITMASK (mask_x & 31)(0x80000000 >> (mask_x & 31));
1108
1109 while (w--)
1110 {
1111 if (bitmask == 0)
1112 {
1113 bitcache = *mask++;
1114 bitmask = CREATE_BITMASK (0)(0x80000000 >> (0));
1115 }
1116 if (bitcache & bitmask)
1117 *dst = src565;
1118 bitmask = UPDATE_BITMASK (bitmask)((bitmask) >> 1);
1119 dst++;
1120 }
1121 }
1122 }
1123 else
1124 {
1125 while (height--)
1126 {
1127 dst = dst_line;
1128 dst_line += dst_stride;
1129 mask = mask_line;
1130 mask_line += mask_stride;
1131 w = width;
1132
1133 bitcache = *mask++;
1134 bitmask = CREATE_BITMASK (mask_x & 31)(0x80000000 >> (mask_x & 31));
1135
1136 while (w--)
1137 {
1138 if (bitmask == 0)
1139 {
1140 bitcache = *mask++;
1141 bitmask = CREATE_BITMASK (0)(0x80000000 >> (0));
1142 }
1143 if (bitcache & bitmask)
1144 {
1145 d = over (src, CONVERT_0565_TO_0888 (*dst)(((((*dst) << 3) & 0xf8) | (((*dst) >> 2) &
0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst) >>
1) & 0x300)) | ((((*dst) << 8) & 0xf80000) | (
((*dst) << 3) & 0x70000)))
);
1146 *dst = CONVERT_8888_TO_0565 (d)((((d) >> 3) & 0x001f) | (((d) >> 5) & 0x07e0
) | (((d) >> 8) & 0xf800))
;
1147 }
1148 bitmask = UPDATE_BITMASK (bitmask)((bitmask) >> 1);
1149 dst++;
1150 }
1151 }
1152 }
1153}
1154
1155/*
1156 * Simple bitblt
1157 */
1158
1159static void
1160fast_composite_solid_fill (pixman_implementation_t *imp,
1161 pixman_composite_info_t *info)
1162{
1163 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
1164 uint32_t src;
1165
1166 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1167
1168 if (dest_image->bits.format == PIXMAN_a1)
1169 {
1170 src = src >> 31;
1171 }
1172 else if (dest_image->bits.format == PIXMAN_a8)
1173 {
1174 src = src >> 24;
1175 }
1176 else if (dest_image->bits.format == PIXMAN_r5g6b5 ||
1177 dest_image->bits.format == PIXMAN_b5g6r5)
1178 {
1179 src = CONVERT_8888_TO_0565 (src)((((src) >> 3) & 0x001f) | (((src) >> 5) &
0x07e0) | (((src) >> 8) & 0xf800))
;
1180 }
1181
1182 pixman_fill (dest_image->bits.bits, dest_image->bits.rowstride,
1183 PIXMAN_FORMAT_BPP (dest_image->bits.format)(((dest_image->bits.format) >> 24) ),
1184 dest_x, dest_y,
1185 width, height,
1186 src);
1187}
1188
1189static void
1190fast_composite_src_memcpy (pixman_implementation_t *imp,
1191 pixman_composite_info_t *info)
1192{
1193 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
1194 int bpp = PIXMAN_FORMAT_BPP (dest_image->bits.format)(((dest_image->bits.format) >> 24) ) / 8;
1195 uint32_t n_bytes = width * bpp;
1196 int dst_stride, src_stride;
1197 uint8_t *dst;
1198 uint8_t *src;
1199
1200 src_stride = src_image->bits.rowstride * 4;
1201 dst_stride = dest_image->bits.rowstride * 4;
1202
1203 src = (uint8_t *)src_image->bits.bits + src_y * src_stride + src_x * bpp;
1204 dst = (uint8_t *)dest_image->bits.bits + dest_y * dst_stride + dest_x * bpp;
1205
1206 while (height--)
1207 {
1208 memcpy (dst, src, n_bytes);
1209
1210 dst += dst_stride;
1211 src += src_stride;
1212 }
1213}
1214
1215FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, SRC, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_cover_SRC
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_8888_cover_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_8888_cover_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD
|| -1 == PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds
(src_image->bits.width, vx, unit_x, &width, &left_pad
, &right_pad); vx += left_pad * unit_x; } while (--height
>= 0) { dst = dst_line; dst_line += dst_stride; if (0 &&
!0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (-1 == PIXMAN_REPEAT_NONE) { static const uint32_t zero[1
] = { 0 }; if (y < 0 || y >= src_image->bits.height)
{ scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_8888_cover_SRC_8888_8888_cover_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1216FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, SRC, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_none_SRC
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_8888_none_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_8888_none_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_8888_none_SRC_8888_8888_none_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1217FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, SRC, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_pad_SRC
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_8888_pad_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_8888_pad_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) { static const uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_8888_pad_SRC_8888_8888_pad_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1218FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, SRC, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_normal_SRC
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_8888_normal_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_8888_normal_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_8888_normal_SRC_8888_8888_normal_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1219FAST_NEAREST (x888_8888_cover, x888, 8888, uint32_t, uint32_t, SRC, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_x888_8888_cover_SRC
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
0xff; a2 = 0xff; if (a1 == 0xff) { *dst = ((s1) | 0xff000000
); } else if (s1) { d = (*dst); s1 = ((s1) | 0xff000000); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = ((s2) | 0xff000000
); } else if (s2) { d = (*dst); s2 = ((s2) | 0xff000000); a2 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s2) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a2)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; } else { *dst++ = ((s1) | 0xff000000); *dst
++ = ((s2) | 0xff000000); } } if (w & 1) { x1 = vx >>
16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
0xff; if (a1 == 0xff) { *dst = ((s1) | 0xff000000); } else if
(s1) { d = (*dst); s1 = ((s1) | 0xff000000); a1 ^= 0xff; do {
uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; } else { *dst++ = ((s1) | 0xff000000); } }
} static __inline__ __attribute__ ((__always_inline__)) void
scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_x888_8888_cover_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_x888_8888_cover_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD
|| -1 == PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds
(src_image->bits.width, vx, unit_x, &width, &left_pad
, &right_pad); vx += left_pad * unit_x; } while (--height
>= 0) { dst = dst_line; dst_line += dst_stride; if (0 &&
!0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (-1 == PIXMAN_REPEAT_NONE) { static const uint32_t zero[1
] = { 0 }; if (y < 0 || y >= src_image->bits.height)
{ scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_x888_8888_cover_SRC_x888_8888_cover_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1220FAST_NEAREST (x888_8888_pad, x888, 8888, uint32_t, uint32_t, SRC, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_x888_8888_pad_SRC
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
0xff; a2 = 0xff; if (a1 == 0xff) { *dst = ((s1) | 0xff000000
); } else if (s1) { d = (*dst); s1 = ((s1) | 0xff000000); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = ((s2) | 0xff000000
); } else if (s2) { d = (*dst); s2 = ((s2) | 0xff000000); a2 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s2) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a2)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; } else { *dst++ = ((s1) | 0xff000000); *dst
++ = ((s2) | 0xff000000); } } if (w & 1) { x1 = vx >>
16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
0xff; if (a1 == 0xff) { *dst = ((s1) | 0xff000000); } else if
(s1) { d = (*dst); s1 = ((s1) | 0xff000000); a1 ^= 0xff; do {
uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; } else { *dst++ = ((s1) | 0xff000000); } }
} static __inline__ __attribute__ ((__always_inline__)) void
scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_x888_8888_pad_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_x888_8888_pad_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) { static const uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_x888_8888_pad_SRC_x888_8888_pad_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1221FAST_NEAREST (x888_8888_normal, x888, 8888, uint32_t, uint32_t, SRC, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_x888_8888_normal_SRC
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
0xff; a2 = 0xff; if (a1 == 0xff) { *dst = ((s1) | 0xff000000
); } else if (s1) { d = (*dst); s1 = ((s1) | 0xff000000); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = ((s2) | 0xff000000
); } else if (s2) { d = (*dst); s2 = ((s2) | 0xff000000); a2 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s2) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a2)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; } else { *dst++ = ((s1) | 0xff000000); *dst
++ = ((s2) | 0xff000000); } } if (w & 1) { x1 = vx >>
16; s1 = src[x1]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
0xff; if (a1 == 0xff) { *dst = ((s1) | 0xff000000); } else if
(s1) { d = (*dst); s1 = ((s1) | 0xff000000); a1 ^= 0xff; do {
uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; } else { *dst++ = ((s1) | 0xff000000); } }
} static __inline__ __attribute__ ((__always_inline__)) void
scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_x888_8888_normal_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_x888_8888_normal_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_x888_8888_normal_SRC_x888_8888_normal_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1222FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, OVER, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_cover_OVER
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_8888_cover_OVER
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_8888_cover_OVER (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD
|| -1 == PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds
(src_image->bits.width, vx, unit_x, &width, &left_pad
, &right_pad); vx += left_pad * unit_x; } while (--height
>= 0) { dst = dst_line; dst_line += dst_stride; if (0 &&
!0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (-1 == PIXMAN_REPEAT_NONE) { static const uint32_t zero[1
] = { 0 }; if (y < 0 || y >= src_image->bits.height)
{ scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_8888_cover_OVER_8888_8888_cover_OVER_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1223FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, OVER, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_none_OVER
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_8888_none_OVER
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_8888_none_OVER (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_8888_none_OVER_8888_8888_none_OVER_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1224FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, OVER, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_pad_OVER
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_8888_pad_OVER
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_8888_pad_OVER (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) { static const uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_8888_pad_OVER_8888_8888_pad_OVER_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1225FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, OVER, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_8888_normal_OVER
(uint32_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = (s1); } else if (s1) { d = (*dst); s1 = (s1); a1 ^=
0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ =
(s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((
a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__
= ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__ &=
0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = (d); } dst++; if (a2 == 0xff) { *dst = (s2); } else if (
s2) { d = (*dst); s2 = (s2); a2 ^= 0xff; do { uint32_t r1__, r2__
, r3__, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__
= ((r1__) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (
t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r1__ &=
0xff00ff; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = (d); } dst++; } else
{ *dst++ = (s1); *dst++ = (s2); } } if (w & 1) { x1 = vx
>> 16; s1 = src[x1]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER
) { a1 = ((s1) >> 24); if (a1 == 0xff) { *dst = (s1); }
else if (s1) { d = (*dst); s1 = (s1); a1 ^= 0xff; do { uint32_t
r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) & 0xff00ff
; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (r2__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r1__
= (t__ & 0xff00ff); } while (0); r2__ = (d) >> 8; r3__
= ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__) &
0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff; } while
(0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff); } while
(0); (d) = r1__ | (r2__ << 8); } while (0); *dst = (d)
; } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
( const uint8_t *mask, uint32_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_8888_normal_OVER
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_8888_normal_OVER (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint32_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_8888_normal_OVER_8888_8888_normal_OVER_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1226FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, SRC, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_cover_SRC
(uint16_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_SRC ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static __inline__ __attribute__ ((__always_inline__
)) void scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_565_cover_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_565_cover_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD
|| -1 == PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds
(src_image->bits.width, vx, unit_x, &width, &left_pad
, &right_pad); vx += left_pad * unit_x; } while (--height
>= 0) { dst = dst_line; dst_line += dst_stride; if (0 &&
!0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (-1 == PIXMAN_REPEAT_NONE) { static const uint32_t zero[1
] = { 0 }; if (y < 0 || y >= src_image->bits.height)
{ scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_565_cover_SRC_8888_565_cover_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1227FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, SRC, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_none_SRC
(uint16_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_SRC ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static __inline__ __attribute__ ((__always_inline__
)) void scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_565_none_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_565_none_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_565_none_SRC_8888_565_none_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1228FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, SRC, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_pad_SRC
(uint16_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_SRC ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static __inline__ __attribute__ ((__always_inline__
)) void scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_565_pad_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_565_pad_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) { static const uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_565_pad_SRC_8888_565_pad_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1229FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, SRC, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_normal_SRC
(uint16_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_SRC ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static __inline__ __attribute__ ((__always_inline__
)) void scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_565_normal_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_565_normal_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_565_normal_SRC_8888_565_normal_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1230FAST_NEAREST (565_565_normal, 0565, 0565, uint16_t, uint16_t, SRC, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_normal_SRC
(uint16_t *dst, const uint16_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint16_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_SRC != PIXMAN_OP_SRC
&& PIXMAN_OP_SRC != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 =
0xff; a2 = 0xff; if (a1 == 0xff) { *dst = (s1); } else if (s1
) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
((((((s1) << 3) & 0xf8) | (((s1) >> 2) &
0x7)) | ((((s1) << 5) & 0xfc00) | (((s1) >> 1
) & 0x300)) | ((((s1) << 8) & 0xf80000) | (((s1
) << 3) & 0x70000))) | 0xff000000); a1 ^= 0xff; do {
uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s1) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a1)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s1) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a1)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; if (a2 ==
0xff) { *dst = (s2); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = ((((((s2) << 3
) & 0xf8) | (((s2) >> 2) & 0x7)) | ((((s2) <<
5) & 0xfc00) | (((s2) >> 1) & 0x300)) | ((((s2
) << 8) & 0xf80000) | (((s2) << 3) & 0x70000
))) | 0xff000000); a2 ^= 0xff; do { uint32_t r1__, r2__, r3__
, t__; r1__ = (d); r2__ = (s2) & 0xff00ff; do { t__ = ((r1__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r1__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff
; } while (0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff
); } while (0); r2__ = (d) >> 8; r3__ = ((s2) >> 8
) & 0xff00ff; do { t__ = ((r2__) & 0xff00ff) * ((a2))
; t__ += 0x800080; r2__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r2__ &= 0xff00ff; } while (0); do { t__ = (
(r2__) + (r3__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r2__ = (t__ & 0xff00ff); } while (0); (d) = r1__
| (r2__ << 8); } while (0); *dst = ((((d) >> 3) &
0x001f) | (((d) >> 5) & 0x07e0) | (((d) >> 8
) & 0xf800)); } dst++; } else { *dst++ = (s1); *dst++ = (
s2); } } if (w & 1) { x1 = vx >> 16; s1 = src[x1]; if
(PIXMAN_OP_SRC == PIXMAN_OP_OVER) { a1 = 0xff; if (a1 == 0xff
) { *dst = (s1); } else if (s1) { d = ((((((*dst) << 3)
& 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst) <<
5) & 0xfc00) | (((*dst) >> 1) & 0x300)) | ((((
*dst) << 8) & 0xf80000) | (((*dst) << 3) &
0x70000))) | 0xff000000); s1 = ((((((s1) << 3) & 0xf8
) | (((s1) >> 2) & 0x7)) | ((((s1) << 5) &
0xfc00) | (((s1) >> 1) & 0x300)) | ((((s1) <<
8) & 0xf80000) | (((s1) << 3) & 0x70000))) | 0xff000000
); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ = (d
); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = (s1); } } } static __inline__ __attribute__
((__always_inline__)) void scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_normal_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_normal_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE) { static const
uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_normal_SRC_565_565_normal_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1231FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, OVER, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_cover_OVER
(uint16_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (-1
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (-1 ==
PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_OVER ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static __inline__ __attribute__ ((__always_inline__
)) void scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
( const uint8_t *mask, uint16_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_565_cover_OVER
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_565_cover_OVER (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD
|| -1 == PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds
(src_image->bits.width, vx, unit_x, &width, &left_pad
, &right_pad); vx += left_pad * unit_x; } while (--height
>= 0) { dst = dst_line; dst_line += dst_stride; if (0 &&
!0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (-1 == PIXMAN_REPEAT_NONE) { static const uint32_t zero[1
] = { 0 }; if (y < 0 || y >= src_image->bits.height)
{ scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_565_cover_OVER_8888_565_cover_OVER_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1232FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, OVER, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_none_OVER
(uint16_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_OVER ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static __inline__ __attribute__ ((__always_inline__
)) void scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
( const uint8_t *mask, uint16_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_565_none_OVER
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_565_none_OVER (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_565_none_OVER_8888_565_none_OVER_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1233FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, OVER, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_pad_OVER
(uint16_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_OVER ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static __inline__ __attribute__ ((__always_inline__
)) void scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
( const uint8_t *mask, uint16_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_565_pad_OVER
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_565_pad_OVER (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) { static const uint32_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_565_pad_OVER_8888_565_pad_OVER_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1234FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_8888_565_normal_OVER
(uint16_t *dst, const uint32_t *src, int32_t w, pixman_fixed_t
vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t
fully_transparent_src) { uint32_t d; uint32_t s1, s2; uint8_t
a1, a2; int x1, x2; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER &&
fully_transparent_src) return; if (PIXMAN_OP_OVER != PIXMAN_OP_SRC
&& PIXMAN_OP_OVER != PIXMAN_OP_OVER) abort(); while (
(w -= 2) >= 0) { x1 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s1 = src[x1]; x2 = vx >> 16; vx += unit_x; if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NORMAL) { while (vx >= max_vx) vx -= max_vx
; } s2 = src[x2]; if (PIXMAN_OP_OVER == PIXMAN_OP_OVER) { a1 =
((s1) >> 24); a2 = ((s2) >> 24); if (a1 == 0xff)
{ *dst = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else
if (s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst)
>> 2) & 0x7)) | ((((*dst) << 5) & 0xfc00
) | (((*dst) >> 1) & 0x300)) | ((((*dst) << 8
) & 0xf80000) | (((*dst) << 3) & 0x70000))) | 0xff000000
); s1 = (s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__
; r1__ = (d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) &
0xff00ff) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >>
8) & 0xff00ff)) >> 8; r1__ &= 0xff00ff; } while
(0); do { t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__
>> 8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while
(0); r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; if (a2 == 0xff) { *dst = ((((s2) >> 3) &
0x001f) | (((s2) >> 5) & 0x07e0) | (((s2) >>
8) & 0xf800)); } else if (s2) { d = ((((((*dst) <<
3) & 0xf8) | (((*dst) >> 2) & 0x7)) | ((((*dst
) << 5) & 0xfc00) | (((*dst) >> 1) & 0x300
)) | ((((*dst) << 8) & 0xf80000) | (((*dst) <<
3) & 0x70000))) | 0xff000000); s2 = (s2); a2 ^= 0xff; do
{ uint32_t r1__, r2__, r3__, t__; r1__ = (d); r2__ = (s2) &
0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((a2)); t__ +=
0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff)) >>
8; r1__ &= 0xff00ff; } while (0); do { t__ = ((r1__) + (
r2__)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff)
; r1__ = (t__ & 0xff00ff); } while (0); r2__ = (d) >>
8; r3__ = ((s2) >> 8) & 0xff00ff; do { t__ = ((r2__
) & 0xff00ff) * ((a2)); t__ += 0x800080; r2__ = (t__ + ((
t__ >> 8) & 0xff00ff)) >> 8; r2__ &= 0xff00ff
; } while (0); do { t__ = ((r2__) + (r3__)); t__ |= 0x10000100
- ((t__ >> 8) & 0xff00ff); r2__ = (t__ & 0xff00ff
); } while (0); (d) = r1__ | (r2__ << 8); } while (0); *
dst = ((((d) >> 3) & 0x001f) | (((d) >> 5) &
0x07e0) | (((d) >> 8) & 0xf800)); } dst++; } else {
*dst++ = ((((s1) >> 3) & 0x001f) | (((s1) >>
5) & 0x07e0) | (((s1) >> 8) & 0xf800)); *dst++
= ((((s2) >> 3) & 0x001f) | (((s2) >> 5) &
0x07e0) | (((s2) >> 8) & 0xf800)); } } if (w &
1) { x1 = vx >> 16; s1 = src[x1]; if (PIXMAN_OP_OVER ==
PIXMAN_OP_OVER) { a1 = ((s1) >> 24); if (a1 == 0xff) {
*dst = ((((s1) >> 3) & 0x001f) | (((s1) >> 5
) & 0x07e0) | (((s1) >> 8) & 0xf800)); } else if
(s1) { d = ((((((*dst) << 3) & 0xf8) | (((*dst) >>
2) & 0x7)) | ((((*dst) << 5) & 0xfc00) | (((*dst
) >> 1) & 0x300)) | ((((*dst) << 8) & 0xf80000
) | (((*dst) << 3) & 0x70000))) | 0xff000000); s1 =
(s1); a1 ^= 0xff; do { uint32_t r1__, r2__, r3__, t__; r1__ =
(d); r2__ = (s1) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff
) * ((a1)); t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) &
0xff00ff)) >> 8; r1__ &= 0xff00ff; } while (0); do
{ t__ = ((r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >>
8) & 0xff00ff); r1__ = (t__ & 0xff00ff); } while (0)
; r2__ = (d) >> 8; r3__ = ((s1) >> 8) & 0xff00ff
; do { t__ = ((r2__) & 0xff00ff) * ((a1)); t__ += 0x800080
; r2__ = (t__ + ((t__ >> 8) & 0xff00ff)) >> 8
; r2__ &= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__
)); t__ |= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__
= (t__ & 0xff00ff); } while (0); (d) = r1__ | (r2__ <<
8); } while (0); *dst = ((((d) >> 3) & 0x001f) | (
((d) >> 5) & 0x07e0) | (((d) >> 8) & 0xf800
)); } dst++; } else { *dst++ = ((((s1) >> 3) & 0x001f
) | (((s1) >> 5) & 0x07e0) | (((s1) >> 8) &
0xf800)); } } } static __inline__ __attribute__ ((__always_inline__
)) void scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
( const uint8_t *mask, uint16_t *dst, const uint32_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_8888_565_normal_OVER
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_8888_565_normal_OVER (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint32_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint32_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_first_line) = ((uint32_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NORMAL
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NORMAL == PIXMAN_REPEAT_NONE) { static const
uint32_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_8888_565_normal_OVER_8888_565_normal_OVER_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1235
1236#define REPEAT_MIN_WIDTH32 32
1237
1238static void
1239fast_composite_tiled_repeat (pixman_implementation_t *imp,
1240 pixman_composite_info_t *info)
1241{
1242 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
1243 pixman_composite_func_t func;
1244 pixman_format_code_t mask_format;
1245 uint32_t src_flags, mask_flags;
1246
1247 src_flags = (info->src_flags & ~FAST_PATH_NORMAL_REPEAT((1 << 15) | (1 << 3) | (1 << 4))) |
1248 FAST_PATH_SAMPLES_COVER_CLIP_NEAREST(1 << 23);
1249
1250 if (mask_image)
1
Assuming 'mask_image' is null
2
Taking false branch
1251 {
1252 mask_format = mask_image->common.extended_format_code;
1253 mask_flags = info->mask_flags;
1254 }
1255 else
1256 {
1257 mask_format = PIXMAN_null(((0) << 24) | ((0) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))
;
1258 mask_flags = FAST_PATH_IS_OPAQUE(1 << 13);
1259 }
1260
1261 if (_pixman_lookup_composite_function (
3
Taking true branch
1262 imp->toplevel, info->op,
1263 src_image->common.extended_format_code, src_flags,
1264 mask_format, mask_flags,
1265 dest_image->common.extended_format_code, info->dest_flags,
1266 &imp, &func))
1267 {
1268 int32_t sx, sy;
1269 int32_t width_remain;
1270 int32_t num_pixels;
1271 int32_t src_width;
1272 int32_t i, j;
1273 pixman_image_t extended_src_image;
1274 uint32_t extended_src[REPEAT_MIN_WIDTH32 * 2];
1275 pixman_bool_t need_src_extension;
1276 uint32_t *src_line;
1277 int32_t src_stride;
1278 int32_t src_bpp;
1279 pixman_composite_info_t info2 = *info;
1280
1281 src_bpp = PIXMAN_FORMAT_BPP (src_image->bits.format)(((src_image->bits.format) >> 24) );
1282
1283 if (src_image->bits.width < REPEAT_MIN_WIDTH32 &&
7
Taking true branch
1284 (src_bpp == 32 || src_bpp == 16 || src_bpp == 8) &&
4
Assuming 'src_bpp' is not equal to 32
5
Assuming 'src_bpp' is not equal to 16
6
Assuming 'src_bpp' is equal to 8
1285 !src_image->bits.indexed)
1286 {
1287 sx = src_x;
1288 sx = MOD (sx, src_image->bits.width)((sx) < 0 ? ((src_image->bits.width) - ((-(sx) - 1) % (
src_image->bits.width))) - 1 : (sx) % (src_image->bits.
width))
;
8
Within the expansion of the macro 'MOD':
a
Assuming 'sx' is >= 0
1289 sx += width;
1290 src_width = 0;
1291
1292 while (src_width < REPEAT_MIN_WIDTH32 && src_width <= sx)
9
Assuming 'src_width' is > 'sx'
10
Loop condition is false. Execution continues on line 1295
1293 src_width += src_image->bits.width;
1294
1295 src_stride = (src_width * (src_bpp >> 3) + 3) / (int) sizeof (uint32_t);
1296
1297 /* Initialize/validate stack-allocated temporary image */
1298 _pixman_bits_image_init (&extended_src_image, src_image->bits.format,
1299 src_width, 1, &extended_src[0], src_stride);
1300 _pixman_image_validate (&extended_src_image);
1301
1302 info2.src_image = &extended_src_image;
1303 need_src_extension = TRUE1;
1304 }
1305 else
1306 {
1307 src_width = src_image->bits.width;
1308 need_src_extension = FALSE0;
1309 }
1310
1311 sx = src_x;
1312 sy = src_y;
1313
1314 while (--height >= 0)
11
Loop condition is true. Entering loop body
1315 {
1316 sx = MOD (sx, src_width)((sx) < 0 ? ((src_width) - ((-(sx) - 1) % (src_width))) - 1
: (sx) % (src_width))
;
12
Within the expansion of the macro 'MOD':
a
Division by zero
1317 sy = MOD (sy, src_image->bits.height)((sy) < 0 ? ((src_image->bits.height) - ((-(sy) - 1) % (
src_image->bits.height))) - 1 : (sy) % (src_image->bits
.height))
;
1318
1319 if (need_src_extension)
1320 {
1321 if (src_bpp == 32)
1322 {
1323 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (sy)
+ (1) * (0); } while (0)
;
1324
1325 for (i = 0; i < src_width; )
1326 {
1327 for (j = 0; j < src_image->bits.width; j++, i++)
1328 extended_src[i] = src_line[j];
1329 }
1330 }
1331 else if (src_bpp == 16)
1332 {
1333 uint16_t *src_line_16;
1334
1335 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint16_t, src_stride,do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_line_16) = ((uint16_t *) __bits__) + (src_stride) * (
sy) + (1) * (0); } while (0)
1336 src_line_16, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_line_16) = ((uint16_t *) __bits__) + (src_stride) * (
sy) + (1) * (0); } while (0)
;
1337 src_line = (uint32_t*)src_line_16;
1338
1339 for (i = 0; i < src_width; )
1340 {
1341 for (j = 0; j < src_image->bits.width; j++, i++)
1342 ((uint16_t*)extended_src)[i] = ((uint16_t*)src_line)[j];
1343 }
1344 }
1345 else if (src_bpp == 8)
1346 {
1347 uint8_t *src_line_8;
1348
1349 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint8_t, src_stride,do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (src_line_8) = ((uint8_t *) __bits__) + (src_stride) * (sy
) + (1) * (0); } while (0)
1350 src_line_8, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (src_line_8) = ((uint8_t *) __bits__) + (src_stride) * (sy
) + (1) * (0); } while (0)
;
1351 src_line = (uint32_t*)src_line_8;
1352
1353 for (i = 0; i < src_width; )
1354 {
1355 for (j = 0; j < src_image->bits.width; j++, i++)
1356 ((uint8_t*)extended_src)[i] = ((uint8_t*)src_line)[j];
1357 }
1358 }
1359
1360 info2.src_y = 0;
1361 }
1362 else
1363 {
1364 info2.src_y = sy;
1365 }
1366
1367 width_remain = width;
1368
1369 while (width_remain > 0)
1370 {
1371 num_pixels = src_width - sx;
1372
1373 if (num_pixels > width_remain)
1374 num_pixels = width_remain;
1375
1376 info2.src_x = sx;
1377 info2.width = num_pixels;
1378 info2.height = 1;
1379
1380 func (imp, &info2);
1381
1382 width_remain -= num_pixels;
1383 info2.mask_x += num_pixels;
1384 info2.dest_x += num_pixels;
1385 sx = 0;
1386 }
1387
1388 sx = src_x;
1389 sy++;
1390 info2.mask_x = info->mask_x;
1391 info2.mask_y++;
1392 info2.dest_x = info->dest_x;
1393 info2.dest_y++;
1394 }
1395
1396 if (need_src_extension)
1397 _pixman_image_fini (&extended_src_image);
1398 }
1399 else
1400 {
1401 _pixman_log_error (FUNC, "Didn't find a suitable function ")do { } while (0);
1402 }
1403}
1404
1405/* Use more unrolling for src_0565_0565 because it is typically CPU bound */
1406static force_inline__inline__ __attribute__ ((__always_inline__)) void
1407scaled_nearest_scanline_565_565_SRC (uint16_t * dst,
1408 const uint16_t * src,
1409 int32_t w,
1410 pixman_fixed_t vx,
1411 pixman_fixed_t unit_x,
1412 pixman_fixed_t max_vx,
1413 pixman_bool_t fully_transparent_src)
1414{
1415 uint16_t tmp1, tmp2, tmp3, tmp4;
1416 while ((w -= 4) >= 0)
1417 {
1418 tmp1 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1419 vx += unit_x;
1420 tmp2 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1421 vx += unit_x;
1422 tmp3 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1423 vx += unit_x;
1424 tmp4 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1425 vx += unit_x;
1426 *dst++ = tmp1;
1427 *dst++ = tmp2;
1428 *dst++ = tmp3;
1429 *dst++ = tmp4;
1430 }
1431 if (w & 2)
1432 {
1433 tmp1 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1434 vx += unit_x;
1435 tmp2 = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1436 vx += unit_x;
1437 *dst++ = tmp1;
1438 *dst++ = tmp2;
1439 }
1440 if (w & 1)
1441 *dst++ = src[pixman_fixed_to_int (vx)((int) ((vx) >> 16))];
1442}
1443
1444FAST_NEAREST_MAINLOOP (565_565_cover_SRC,static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_cover_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD
|| -1 == PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds
(src_image->bits.width, vx, unit_x, &width, &left_pad
, &right_pad); vx += left_pad * unit_x; } while (--height
>= 0) { dst = dst_line; dst_line += dst_stride; if (0 &&
!0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (-1 == PIXMAN_REPEAT_NONE) { static const uint16_t zero[1
] = { 0 }; if (y < 0 || y >= src_image->bits.height)
{ scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1445 scaled_nearest_scanline_565_565_SRC,static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_cover_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD
|| -1 == PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds
(src_image->bits.width, vx, unit_x, &width, &left_pad
, &right_pad); vx += left_pad * unit_x; } while (--height
>= 0) { dst = dst_line; dst_line += dst_stride; if (0 &&
!0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (-1 == PIXMAN_REPEAT_NONE) { static const uint16_t zero[1
] = { 0 }; if (y < 0 || y >= src_image->bits.height)
{ scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1446 uint16_t, uint16_t, COVER)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_cover_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (-1 == PIXMAN_REPEAT_NORMAL) { max_vx = src_image
->bits.width << 16; max_vy = src_image->bits.height
<< 16; repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx);
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); } if (-1 == PIXMAN_REPEAT_PAD
|| -1 == PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds
(src_image->bits.width, vx, unit_x, &width, &left_pad
, &right_pad); vx += left_pad * unit_x; } while (--height
>= 0) { dst = dst_line; dst_line += dst_stride; if (0 &&
!0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (-1 == PIXMAN_REPEAT_NORMAL) repeat (PIXMAN_REPEAT_NORMAL
, &vy, max_vy); if (-1 == PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD
, &y, src_image->bits.height); src = src_first_line + src_stride
* y; if (left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (-1 == PIXMAN_REPEAT_NONE) { static const uint16_t zero[1
] = { 0 }; if (y < 0 || y >= src_image->bits.height)
{ scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_cover_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1447FAST_NEAREST_MAINLOOP (565_565_none_SRC,static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_none_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) { static const
uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1448 scaled_nearest_scanline_565_565_SRC,static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_none_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) { static const
uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1449 uint16_t, uint16_t, NONE)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_none_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_NONE
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_NONE == PIXMAN_REPEAT_NONE) { static const
uint16_t zero[1] = { 0 }; if (y < 0 || y >= src_image->
bits.height) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_none_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1450FAST_NEAREST_MAINLOOP (565_565_pad_SRC,static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_pad_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper (
mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) { static const uint16_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1451 scaled_nearest_scanline_565_565_SRC,static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_pad_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper (
mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) { static const uint16_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1452 uint16_t, uint16_t, PAD)static __inline__ __attribute__ ((__always_inline__)) void scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
( const uint8_t *mask, uint16_t *dst, const uint16_t *src, int32_t
w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx
, pixman_bool_t fully_transparent_src) { scaled_nearest_scanline_565_565_SRC
(dst, src, w, vx, unit_x, max_vx, fully_transparent_src); } static
void fast_composite_scaled_nearest_565_565_pad_SRC (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint8_t
*mask_line; uint16_t *src_first_line; int y; pixman_fixed_t max_vx
= (2147483647); pixman_fixed_t max_vy; pixman_vector_t v; pixman_fixed_t
vx, vy; pixman_fixed_t unit_x, unit_y; int32_t left_pad, right_pad
; uint16_t *src; uint16_t *dst; uint8_t solid_mask; const uint8_t
*mask = &solid_mask; int src_stride, mask_stride, dst_stride
; do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); if (0) { if (0) solid_mask
= _pixman_image_get_solid (imp, mask_image, dest_image->bits
.format); else do { uint32_t *__bits__; int __stride__; __bits__
= mask_image->bits.bits; __stride__ = mask_image->bits
.rowstride; (mask_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (mask_line) = ((uint8_t *) __bits__
) + (mask_stride) * (mask_y) + (1) * (mask_x); } while (0); }
do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_first_line) = ((uint16_t *) __bits__) + (src_stride) *
(0) + (1) * (0); } while (0); v.vector[0] = ((pixman_fixed_t
) ((src_x) << 16)) + (((pixman_fixed_t) ((1) << 16
))) / 2; v.vector[1] = ((pixman_fixed_t) ((src_y) << 16
)) + (((pixman_fixed_t) ((1) << 16))) / 2; v.vector[2] =
(((pixman_fixed_t) ((1) << 16))); if (!pixman_transform_point_3d
(src_image->common.transform, &v)) return; unit_x = src_image
->common.transform->matrix[0][0]; unit_y = src_image->
common.transform->matrix[1][1]; v.vector[0] -= ((pixman_fixed_t
) 1); v.vector[1] -= ((pixman_fixed_t) 1); vx = v.vector[0]; vy
= v.vector[1]; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) { max_vx = src_image->bits.width << 16; max_vy = src_image
->bits.height << 16; repeat (PIXMAN_REPEAT_NORMAL, &
vx, max_vx); repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); }
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_PAD || PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_NONE) { pad_repeat_get_scanline_bounds (src_image
->bits.width, vx, unit_x, &width, &left_pad, &
right_pad); vx += left_pad * unit_x; } while (--height >= 0
) { dst = dst_line; dst_line += dst_stride; if (0 && !
0) { mask = mask_line; mask_line += mask_stride; } y = vy >>
16; vy += unit_y; if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NORMAL
) repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); if (PIXMAN_REPEAT_PAD
== PIXMAN_REPEAT_PAD) { repeat (PIXMAN_REPEAT_PAD, &y, src_image
->bits.height); src = src_first_line + src_stride * y; if (
left_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, src, left_pad, 0, 0, 0, 0); } if (width > 0) {
scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper (
mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, src
+ src_image->bits.width - 1, right_pad, 0, 0, 0, 0); } } else
if (PIXMAN_REPEAT_PAD == PIXMAN_REPEAT_NONE) { static const uint16_t
zero[1] = { 0 }; if (y < 0 || y >= src_image->bits.
height) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, 1);
continue; } src = src_first_line + src_stride * y; if (left_pad
> 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, zero, left_pad, 0, 0, 0, 1); } if (width > 0)
{ scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad), dst + left_pad, src, width, vx, unit_x
, 0, 0); } if (right_pad > 0) { scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask + (0 ? 0 : left_pad + width), dst + left_pad + width, zero
, right_pad, 0, 0, 0, 1); } } else { src = src_first_line + src_stride
* y; scaled_nearest_scanline_565_565_SRC_565_565_pad_SRC_wrapper
(mask, dst, src, width, vx, unit_x, max_vx, 0); } } }
1453
1454static force_inline__inline__ __attribute__ ((__always_inline__)) uint32_t
1455fetch_nearest (pixman_repeat_t src_repeat,
1456 pixman_format_code_t format,
1457 uint32_t *src, int x, int src_width)
1458{
1459 if (repeat (src_repeat, &x, src_width))
1460 {
1461 if (format == PIXMAN_x8r8g8b8)
1462 return *(src + x) | 0xff000000;
1463 else
1464 return *(src + x);
1465 }
1466 else
1467 {
1468 return 0;
1469 }
1470}
1471
1472static force_inline__inline__ __attribute__ ((__always_inline__)) void
1473combine_over (uint32_t s, uint32_t *dst)
1474{
1475 if (s)
1476 {
1477 uint8_t ia = 0xff - (s >> 24);
1478
1479 if (ia)
1480 UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s)do { uint32_t r1__, r2__, r3__, t__; r1__ = (*dst); r2__ = (s
) & 0xff00ff; do { t__ = ((r1__) & 0xff00ff) * ((ia))
; t__ += 0x800080; r1__ = (t__ + ((t__ >> 8) & 0xff00ff
)) >> 8; r1__ &= 0xff00ff; } while (0); do { t__ = (
(r1__) + (r2__)); t__ |= 0x10000100 - ((t__ >> 8) &
0xff00ff); r1__ = (t__ & 0xff00ff); } while (0); r2__ = (
*dst) >> 8; r3__ = ((s) >> 8) & 0xff00ff; do {
t__ = ((r2__) & 0xff00ff) * ((ia)); t__ += 0x800080; r2__
= (t__ + ((t__ >> 8) & 0xff00ff)) >> 8; r2__
&= 0xff00ff; } while (0); do { t__ = ((r2__) + (r3__)); t__
|= 0x10000100 - ((t__ >> 8) & 0xff00ff); r2__ = (t__
& 0xff00ff); } while (0); (*dst) = r1__ | (r2__ <<
8); } while (0)
;
1481 else
1482 *dst = s;
1483 }
1484}
1485
1486static force_inline__inline__ __attribute__ ((__always_inline__)) void
1487combine_src (uint32_t s, uint32_t *dst)
1488{
1489 *dst = s;
1490}
1491
1492static void
1493fast_composite_scaled_nearest (pixman_implementation_t *imp,
1494 pixman_composite_info_t *info)
1495{
1496 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
;
1497 uint32_t *dst_line;
1498 uint32_t *src_line;
1499 int dst_stride, src_stride;
1500 int src_width, src_height;
1501 pixman_repeat_t src_repeat;
1502 pixman_fixed_t unit_x, unit_y;
1503 pixman_format_code_t src_format;
1504 pixman_vector_t v;
1505 pixman_fixed_t vy;
1506
1507 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
;
1508 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
1509 * transformed from destination space to source space
1510 */
1511 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (0) +
(1) * (0); } while (0)
;
1512
1513 /* reference point is the center of the pixel */
1514 v.vector[0] = pixman_int_to_fixed (src_x)((pixman_fixed_t) ((src_x) << 16)) + pixman_fixed_1(((pixman_fixed_t) ((1) << 16))) / 2;
1515 v.vector[1] = pixman_int_to_fixed (src_y)((pixman_fixed_t) ((src_y) << 16)) + pixman_fixed_1(((pixman_fixed_t) ((1) << 16))) / 2;
1516 v.vector[2] = pixman_fixed_1(((pixman_fixed_t) ((1) << 16)));
1517
1518 if (!pixman_transform_point_3d (src_image->common.transform, &v))
1519 return;
1520
1521 unit_x = src_image->common.transform->matrix[0][0];
1522 unit_y = src_image->common.transform->matrix[1][1];
1523
1524 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
1525 v.vector[0] -= pixman_fixed_e((pixman_fixed_t) 1);
1526 v.vector[1] -= pixman_fixed_e((pixman_fixed_t) 1);
1527
1528 src_height = src_image->bits.height;
1529 src_width = src_image->bits.width;
1530 src_repeat = src_image->common.repeat;
1531 src_format = src_image->bits.format;
1532
1533 vy = v.vector[1];
1534 while (height--)
1535 {
1536 pixman_fixed_t vx = v.vector[0];
1537 int y = pixman_fixed_to_int (vy)((int) ((vy) >> 16));
1538 uint32_t *dst = dst_line;
1539
1540 dst_line += dst_stride;
1541
1542 /* adjust the y location by a unit vector in the y direction
1543 * this is equivalent to transforming y+1 of the destination point to source space */
1544 vy += unit_y;
1545
1546 if (!repeat (src_repeat, &y, src_height))
1547 {
1548 if (op == PIXMAN_OP_SRC)
1549 memset (dst, 0, sizeof (*dst) * width);
1550 }
1551 else
1552 {
1553 int w = width;
1554
1555 uint32_t *src = src_line + y * src_stride;
1556
1557 while (w >= 2)
1558 {
1559 uint32_t s1, s2;
1560 int x1, x2;
1561
1562 x1 = pixman_fixed_to_int (vx)((int) ((vx) >> 16));
1563 vx += unit_x;
1564
1565 x2 = pixman_fixed_to_int (vx)((int) ((vx) >> 16));
1566 vx += unit_x;
1567
1568 w -= 2;
1569
1570 s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width);
1571 s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width);
1572
1573 if (op == PIXMAN_OP_OVER)
1574 {
1575 combine_over (s1, dst++);
1576 combine_over (s2, dst++);
1577 }
1578 else
1579 {
1580 combine_src (s1, dst++);
1581 combine_src (s2, dst++);
1582 }
1583 }
1584
1585 while (w--)
1586 {
1587 uint32_t s;
1588 int x;
1589
1590 x = pixman_fixed_to_int (vx)((int) ((vx) >> 16));
1591 vx += unit_x;
1592
1593 s = fetch_nearest (src_repeat, src_format, src, x, src_width);
1594
1595 if (op == PIXMAN_OP_OVER)
1596 combine_over (s, dst++);
1597 else
1598 combine_src (s, dst++);
1599 }
1600 }
1601 }
1602}
1603
1604#define CACHE_LINE_SIZE64 64
1605
1606#define FAST_SIMPLE_ROTATE(suffix, pix_type)static void blt_rotated_90_trivial_suffix (pix_type *dst, int
dst_stride, const pix_type *src, int src_stride, int w, int h
) { int x, y; for (y = 0; y < h; y++) { const pix_type *s =
src + (h - y - 1); pix_type *d = dst + dst_stride * y; for (
x = 0; x < w; x++) { *d++ = *s; s += src_stride; } } } static
void blt_rotated_270_trivial_suffix (pix_type *dst, int dst_stride
, const pix_type *src, int src_stride, int w, int h) { int x,
y; for (y = 0; y < h; y++) { const pix_type *s = src + src_stride
* (w - 1) + y; pix_type *d = dst + dst_stride * y; for (x = 0
; x < w; x++) { *d++ = *s; s -= src_stride; } } } static void
blt_rotated_90_suffix (pix_type *dst, int dst_stride, const pix_type
*src, int src_stride, int W, int H) { int x; int leading_pixels
= 0, trailing_pixels = 0; const int TILE_SIZE = 64 / sizeof(
pix_type); if ((uintptr_t)dst & (64 - 1)) { leading_pixels
= TILE_SIZE - (((uintptr_t)dst & (64 - 1)) / sizeof(pix_type
)); if (leading_pixels > W) leading_pixels = W; blt_rotated_90_trivial_suffix
( dst, dst_stride, src, src_stride, leading_pixels, H); dst +=
leading_pixels; src += leading_pixels * src_stride; W -= leading_pixels
; } if ((uintptr_t)(dst + W) & (64 - 1)) { trailing_pixels
= (((uintptr_t)(dst + W) & (64 - 1)) / sizeof(pix_type))
; if (trailing_pixels > W) trailing_pixels = W; W -= trailing_pixels
; } for (x = 0; x < W; x += TILE_SIZE) { blt_rotated_90_trivial_suffix
( dst + x, dst_stride, src + src_stride * x, src_stride, TILE_SIZE
, H); } if (trailing_pixels) { blt_rotated_90_trivial_suffix (
dst + W, dst_stride, src + W * src_stride, src_stride, trailing_pixels
, H); } } static void blt_rotated_270_suffix (pix_type *dst, int
dst_stride, const pix_type *src, int src_stride, int W, int H
) { int x; int leading_pixels = 0, trailing_pixels = 0; const
int TILE_SIZE = 64 / sizeof(pix_type); if ((uintptr_t)dst &
(64 - 1)) { leading_pixels = TILE_SIZE - (((uintptr_t)dst &
(64 - 1)) / sizeof(pix_type)); if (leading_pixels > W) leading_pixels
= W; blt_rotated_270_trivial_suffix ( dst, dst_stride, src +
src_stride * (W - leading_pixels), src_stride, leading_pixels
, H); dst += leading_pixels; W -= leading_pixels; } if ((uintptr_t
)(dst + W) & (64 - 1)) { trailing_pixels = (((uintptr_t)(
dst + W) & (64 - 1)) / sizeof(pix_type)); if (trailing_pixels
> W) trailing_pixels = W; W -= trailing_pixels; src += trailing_pixels
* src_stride; } for (x = 0; x < W; x += TILE_SIZE) { blt_rotated_270_trivial_suffix
( dst + x, dst_stride, src + src_stride * (W - x - TILE_SIZE
), src_stride, TILE_SIZE, H); } if (trailing_pixels) { blt_rotated_270_trivial_suffix
( dst + W, dst_stride, src - trailing_pixels * src_stride, src_stride
, trailing_pixels, H); } } static void fast_composite_rotate_90_suffix
(pixman_implementation_t *imp, pixman_composite_info_t *info
) { __attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height; pix_type *dst_line; pix_type *src_line; int dst_stride
, src_stride; int src_x_t, src_y_t; do { uint32_t *__bits__; int
__stride__; __bits__ = dest_image->bits.bits; __stride__ =
dest_image->bits.rowstride; (dst_stride) = __stride__ * (
int) sizeof (uint32_t) / (int) sizeof (pix_type); (dst_line) =
((pix_type *) __bits__) + (dst_stride) * (dest_y) + (1) * (dest_x
); } while (0); src_x_t = -src_y + ((int) ((src_image->common
.transform->matrix[0][2] + (((pixman_fixed_t) ((1) <<
16))) / 2 - ((pixman_fixed_t) 1)) >> 16)) - height; src_y_t
= src_x + ((int) ((src_image->common.transform->matrix
[1][2] + (((pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t
) 1)) >> 16)); do { uint32_t *__bits__; int __stride__;
__bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (pix_type); (src_line) = ((pix_type *) __bits__
) + (src_stride) * (src_y_t) + (1) * (src_x_t); } while (0); blt_rotated_90_suffix
(dst_line, dst_stride, src_line, src_stride, width, height);
} static void fast_composite_rotate_270_suffix (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; pix_type *dst_line; pix_type
*src_line; int dst_stride, src_stride; int src_x_t, src_y_t;
do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(pix_type); (dst_line) = ((pix_type *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); src_x_t = src_y +
((int) ((src_image->common.transform->matrix[0][2] + (
((pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t)
1)) >> 16)); src_y_t = -src_x + ((int) ((src_image->
common.transform->matrix[1][2] + (((pixman_fixed_t) ((1) <<
16))) / 2 - ((pixman_fixed_t) 1)) >> 16)) - width; do {
uint32_t *__bits__; int __stride__; __bits__ = src_image->
bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (pix_type
); (src_line) = ((pix_type *) __bits__) + (src_stride) * (src_y_t
) + (1) * (src_x_t); } while (0); blt_rotated_270_suffix (dst_line
, dst_stride, src_line, src_stride, width, height); }
\
1607 \
1608static void \
1609blt_rotated_90_trivial_##suffix (pix_type *dst, \
1610 int dst_stride, \
1611 const pix_type *src, \
1612 int src_stride, \
1613 int w, \
1614 int h) \
1615{ \
1616 int x, y; \
1617 for (y = 0; y < h; y++) \
1618 { \
1619 const pix_type *s = src + (h - y - 1); \
1620 pix_type *d = dst + dst_stride * y; \
1621 for (x = 0; x < w; x++) \
1622 { \
1623 *d++ = *s; \
1624 s += src_stride; \
1625 } \
1626 } \
1627} \
1628 \
1629static void \
1630blt_rotated_270_trivial_##suffix (pix_type *dst, \
1631 int dst_stride, \
1632 const pix_type *src, \
1633 int src_stride, \
1634 int w, \
1635 int h) \
1636{ \
1637 int x, y; \
1638 for (y = 0; y < h; y++) \
1639 { \
1640 const pix_type *s = src + src_stride * (w - 1) + y; \
1641 pix_type *d = dst + dst_stride * y; \
1642 for (x = 0; x < w; x++) \
1643 { \
1644 *d++ = *s; \
1645 s -= src_stride; \
1646 } \
1647 } \
1648} \
1649 \
1650static void \
1651blt_rotated_90_##suffix (pix_type *dst, \
1652 int dst_stride, \
1653 const pix_type *src, \
1654 int src_stride, \
1655 int W, \
1656 int H) \
1657{ \
1658 int x; \
1659 int leading_pixels = 0, trailing_pixels = 0; \
1660 const int TILE_SIZE = CACHE_LINE_SIZE64 / sizeof(pix_type); \
1661 \
1662 /* \
1663 * split processing into handling destination as TILE_SIZExH cache line \
1664 * aligned vertical stripes (optimistically assuming that destination \
1665 * stride is a multiple of cache line, if not - it will be just a bit \
1666 * slower) \
1667 */ \
1668 \
1669 if ((uintptr_t)dst & (CACHE_LINE_SIZE64 - 1)) \
1670 { \
1671 leading_pixels = TILE_SIZE - (((uintptr_t)dst & \
1672 (CACHE_LINE_SIZE64 - 1)) / sizeof(pix_type)); \
1673 if (leading_pixels > W) \
1674 leading_pixels = W; \
1675 \
1676 /* unaligned leading part NxH (where N < TILE_SIZE) */ \
1677 blt_rotated_90_trivial_##suffix ( \
1678 dst, \
1679 dst_stride, \
1680 src, \
1681 src_stride, \
1682 leading_pixels, \
1683 H); \
1684 \
1685 dst += leading_pixels; \
1686 src += leading_pixels * src_stride; \
1687 W -= leading_pixels; \
1688 } \
1689 \
1690 if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE64 - 1)) \
1691 { \
1692 trailing_pixels = (((uintptr_t)(dst + W) & \
1693 (CACHE_LINE_SIZE64 - 1)) / sizeof(pix_type)); \
1694 if (trailing_pixels > W) \
1695 trailing_pixels = W; \
1696 W -= trailing_pixels; \
1697 } \
1698 \
1699 for (x = 0; x < W; x += TILE_SIZE) \
1700 { \
1701 /* aligned middle part TILE_SIZExH */ \
1702 blt_rotated_90_trivial_##suffix ( \
1703 dst + x, \
1704 dst_stride, \
1705 src + src_stride * x, \
1706 src_stride, \
1707 TILE_SIZE, \
1708 H); \
1709 } \
1710 \
1711 if (trailing_pixels) \
1712 { \
1713 /* unaligned trailing part NxH (where N < TILE_SIZE) */ \
1714 blt_rotated_90_trivial_##suffix ( \
1715 dst + W, \
1716 dst_stride, \
1717 src + W * src_stride, \
1718 src_stride, \
1719 trailing_pixels, \
1720 H); \
1721 } \
1722} \
1723 \
1724static void \
1725blt_rotated_270_##suffix (pix_type *dst, \
1726 int dst_stride, \
1727 const pix_type *src, \
1728 int src_stride, \
1729 int W, \
1730 int H) \
1731{ \
1732 int x; \
1733 int leading_pixels = 0, trailing_pixels = 0; \
1734 const int TILE_SIZE = CACHE_LINE_SIZE64 / sizeof(pix_type); \
1735 \
1736 /* \
1737 * split processing into handling destination as TILE_SIZExH cache line \
1738 * aligned vertical stripes (optimistically assuming that destination \
1739 * stride is a multiple of cache line, if not - it will be just a bit \
1740 * slower) \
1741 */ \
1742 \
1743 if ((uintptr_t)dst & (CACHE_LINE_SIZE64 - 1)) \
1744 { \
1745 leading_pixels = TILE_SIZE - (((uintptr_t)dst & \
1746 (CACHE_LINE_SIZE64 - 1)) / sizeof(pix_type)); \
1747 if (leading_pixels > W) \
1748 leading_pixels = W; \
1749 \
1750 /* unaligned leading part NxH (where N < TILE_SIZE) */ \
1751 blt_rotated_270_trivial_##suffix ( \
1752 dst, \
1753 dst_stride, \
1754 src + src_stride * (W - leading_pixels), \
1755 src_stride, \
1756 leading_pixels, \
1757 H); \
1758 \
1759 dst += leading_pixels; \
1760 W -= leading_pixels; \
1761 } \
1762 \
1763 if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE64 - 1)) \
1764 { \
1765 trailing_pixels = (((uintptr_t)(dst + W) & \
1766 (CACHE_LINE_SIZE64 - 1)) / sizeof(pix_type)); \
1767 if (trailing_pixels > W) \
1768 trailing_pixels = W; \
1769 W -= trailing_pixels; \
1770 src += trailing_pixels * src_stride; \
1771 } \
1772 \
1773 for (x = 0; x < W; x += TILE_SIZE) \
1774 { \
1775 /* aligned middle part TILE_SIZExH */ \
1776 blt_rotated_270_trivial_##suffix ( \
1777 dst + x, \
1778 dst_stride, \
1779 src + src_stride * (W - x - TILE_SIZE), \
1780 src_stride, \
1781 TILE_SIZE, \
1782 H); \
1783 } \
1784 \
1785 if (trailing_pixels) \
1786 { \
1787 /* unaligned trailing part NxH (where N < TILE_SIZE) */ \
1788 blt_rotated_270_trivial_##suffix ( \
1789 dst + W, \
1790 dst_stride, \
1791 src - trailing_pixels * src_stride, \
1792 src_stride, \
1793 trailing_pixels, \
1794 H); \
1795 } \
1796} \
1797 \
1798static void \
1799fast_composite_rotate_90_##suffix (pixman_implementation_t *imp, \
1800 pixman_composite_info_t *info) \
1801{ \
1802 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
; \
1803 pix_type *dst_line; \
1804 pix_type *src_line; \
1805 int dst_stride, src_stride; \
1806 int src_x_t, src_y_t; \
1807 \
1808 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(pix_type); (dst_line) = ((pix_type *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
1809 dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(pix_type); (dst_line) = ((pix_type *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
; \
1810 src_x_t = -src_y + pixman_fixed_to_int ( \((int) ((src_image->common.transform->matrix[0][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
1811 src_image->common.transform->matrix[0][2] + \((int) ((src_image->common.transform->matrix[0][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
1812 pixman_fixed_1 / 2 - pixman_fixed_e)((int) ((src_image->common.transform->matrix[0][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
- height;\
1813 src_y_t = src_x + pixman_fixed_to_int ( \((int) ((src_image->common.transform->matrix[1][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
1814 src_image->common.transform->matrix[1][2] + \((int) ((src_image->common.transform->matrix[1][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
1815 pixman_fixed_1 / 2 - pixman_fixed_e)((int) ((src_image->common.transform->matrix[1][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
; \
1816 PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (pix_type
); (src_line) = ((pix_type *) __bits__) + (src_stride) * (src_y_t
) + (1) * (src_x_t); } while (0)
1817 src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (pix_type
); (src_line) = ((pix_type *) __bits__) + (src_stride) * (src_y_t
) + (1) * (src_x_t); } while (0)
; \
1818 blt_rotated_90_##suffix (dst_line, dst_stride, src_line, src_stride, \
1819 width, height); \
1820} \
1821 \
1822static void \
1823fast_composite_rotate_270_##suffix (pixman_implementation_t *imp, \
1824 pixman_composite_info_t *info) \
1825{ \
1826 PIXMAN_COMPOSITE_ARGS (info)__attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height
; \
1827 pix_type *dst_line; \
1828 pix_type *src_line; \
1829 int dst_stride, src_stride; \
1830 int src_x_t, src_y_t; \
1831 \
1832 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(pix_type); (dst_line) = ((pix_type *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
1833 dst_stride, dst_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(pix_type); (dst_line) = ((pix_type *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0)
; \
1834 src_x_t = src_y + pixman_fixed_to_int ( \((int) ((src_image->common.transform->matrix[0][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
1835 src_image->common.transform->matrix[0][2] + \((int) ((src_image->common.transform->matrix[0][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
1836 pixman_fixed_1 / 2 - pixman_fixed_e)((int) ((src_image->common.transform->matrix[0][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
; \
1837 src_y_t = -src_x + pixman_fixed_to_int ( \((int) ((src_image->common.transform->matrix[1][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
1838 src_image->common.transform->matrix[1][2] + \((int) ((src_image->common.transform->matrix[1][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
1839 pixman_fixed_1 / 2 - pixman_fixed_e)((int) ((src_image->common.transform->matrix[1][2] + ((
(pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t) 1
)) >> 16))
- width; \
1840 PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (pix_type
); (src_line) = ((pix_type *) __bits__) + (src_stride) * (src_y_t
) + (1) * (src_x_t); } while (0)
1841 src_stride, src_line, 1)do { uint32_t *__bits__; int __stride__; __bits__ = src_image
->bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (pix_type
); (src_line) = ((pix_type *) __bits__) + (src_stride) * (src_y_t
) + (1) * (src_x_t); } while (0)
; \
1842 blt_rotated_270_##suffix (dst_line, dst_stride, src_line, src_stride, \
1843 width, height); \
1844}
1845
1846FAST_SIMPLE_ROTATE (8, uint8_t)static void blt_rotated_90_trivial_8 (uint8_t *dst, int dst_stride
, const uint8_t *src, int src_stride, int w, int h) { int x, y
; for (y = 0; y < h; y++) { const uint8_t *s = src + (h - y
- 1); uint8_t *d = dst + dst_stride * y; for (x = 0; x < w
; x++) { *d++ = *s; s += src_stride; } } } static void blt_rotated_270_trivial_8
(uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride
, int w, int h) { int x, y; for (y = 0; y < h; y++) { const
uint8_t *s = src + src_stride * (w - 1) + y; uint8_t *d = dst
+ dst_stride * y; for (x = 0; x < w; x++) { *d++ = *s; s -=
src_stride; } } } static void blt_rotated_90_8 (uint8_t *dst
, int dst_stride, const uint8_t *src, int src_stride, int W, int
H) { int x; int leading_pixels = 0, trailing_pixels = 0; const
int TILE_SIZE = 64 / sizeof(uint8_t); if ((uintptr_t)dst &
(64 - 1)) { leading_pixels = TILE_SIZE - (((uintptr_t)dst &
(64 - 1)) / sizeof(uint8_t)); if (leading_pixels > W) leading_pixels
= W; blt_rotated_90_trivial_8 ( dst, dst_stride, src, src_stride
, leading_pixels, H); dst += leading_pixels; src += leading_pixels
* src_stride; W -= leading_pixels; } if ((uintptr_t)(dst + W
) & (64 - 1)) { trailing_pixels = (((uintptr_t)(dst + W) &
(64 - 1)) / sizeof(uint8_t)); if (trailing_pixels > W) trailing_pixels
= W; W -= trailing_pixels; } for (x = 0; x < W; x += TILE_SIZE
) { blt_rotated_90_trivial_8 ( dst + x, dst_stride, src + src_stride
* x, src_stride, TILE_SIZE, H); } if (trailing_pixels) { blt_rotated_90_trivial_8
( dst + W, dst_stride, src + W * src_stride, src_stride, trailing_pixels
, H); } } static void blt_rotated_270_8 (uint8_t *dst, int dst_stride
, const uint8_t *src, int src_stride, int W, int H) { int x; int
leading_pixels = 0, trailing_pixels = 0; const int TILE_SIZE
= 64 / sizeof(uint8_t); if ((uintptr_t)dst & (64 - 1)) {
leading_pixels = TILE_SIZE - (((uintptr_t)dst & (64 - 1)
) / sizeof(uint8_t)); if (leading_pixels > W) leading_pixels
= W; blt_rotated_270_trivial_8 ( dst, dst_stride, src + src_stride
* (W - leading_pixels), src_stride, leading_pixels, H); dst +=
leading_pixels; W -= leading_pixels; } if ((uintptr_t)(dst +
W) & (64 - 1)) { trailing_pixels = (((uintptr_t)(dst + W
) & (64 - 1)) / sizeof(uint8_t)); if (trailing_pixels >
W) trailing_pixels = W; W -= trailing_pixels; src += trailing_pixels
* src_stride; } for (x = 0; x < W; x += TILE_SIZE) { blt_rotated_270_trivial_8
( dst + x, dst_stride, src + src_stride * (W - x - TILE_SIZE
), src_stride, TILE_SIZE, H); } if (trailing_pixels) { blt_rotated_270_trivial_8
( dst + W, dst_stride, src - trailing_pixels * src_stride, src_stride
, trailing_pixels, H); } } static void fast_composite_rotate_90_8
(pixman_implementation_t *imp, pixman_composite_info_t *info
) { __attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height; uint8_t *dst_line; uint8_t *src_line; int dst_stride
, src_stride; int src_x_t, src_y_t; do { uint32_t *__bits__; int
__stride__; __bits__ = dest_image->bits.bits; __stride__ =
dest_image->bits.rowstride; (dst_stride) = __stride__ * (
int) sizeof (uint32_t) / (int) sizeof (uint8_t); (dst_line) =
((uint8_t *) __bits__) + (dst_stride) * (dest_y) + (1) * (dest_x
); } while (0); src_x_t = -src_y + ((int) ((src_image->common
.transform->matrix[0][2] + (((pixman_fixed_t) ((1) <<
16))) / 2 - ((pixman_fixed_t) 1)) >> 16)) - height; src_y_t
= src_x + ((int) ((src_image->common.transform->matrix
[1][2] + (((pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t
) 1)) >> 16)); do { uint32_t *__bits__; int __stride__;
__bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint8_t); (src_line) = ((uint8_t *) __bits__
) + (src_stride) * (src_y_t) + (1) * (src_x_t); } while (0); blt_rotated_90_8
(dst_line, dst_stride, src_line, src_stride, width, height);
} static void fast_composite_rotate_270_8 (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint8_t *dst_line; uint8_t
*src_line; int dst_stride, src_stride; int src_x_t, src_y_t;
do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint8_t); (dst_line) = ((uint8_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); src_x_t = src_y +
((int) ((src_image->common.transform->matrix[0][2] + (
((pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t)
1)) >> 16)); src_y_t = -src_x + ((int) ((src_image->
common.transform->matrix[1][2] + (((pixman_fixed_t) ((1) <<
16))) / 2 - ((pixman_fixed_t) 1)) >> 16)) - width; do {
uint32_t *__bits__; int __stride__; __bits__ = src_image->
bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint8_t
); (src_line) = ((uint8_t *) __bits__) + (src_stride) * (src_y_t
) + (1) * (src_x_t); } while (0); blt_rotated_270_8 (dst_line
, dst_stride, src_line, src_stride, width, height); }
1847FAST_SIMPLE_ROTATE (565, uint16_t)static void blt_rotated_90_trivial_565 (uint16_t *dst, int dst_stride
, const uint16_t *src, int src_stride, int w, int h) { int x,
y; for (y = 0; y < h; y++) { const uint16_t *s = src + (h
- y - 1); uint16_t *d = dst + dst_stride * y; for (x = 0; x <
w; x++) { *d++ = *s; s += src_stride; } } } static void blt_rotated_270_trivial_565
(uint16_t *dst, int dst_stride, const uint16_t *src, int src_stride
, int w, int h) { int x, y; for (y = 0; y < h; y++) { const
uint16_t *s = src + src_stride * (w - 1) + y; uint16_t *d = dst
+ dst_stride * y; for (x = 0; x < w; x++) { *d++ = *s; s -=
src_stride; } } } static void blt_rotated_90_565 (uint16_t *
dst, int dst_stride, const uint16_t *src, int src_stride, int
W, int H) { int x; int leading_pixels = 0, trailing_pixels =
0; const int TILE_SIZE = 64 / sizeof(uint16_t); if ((uintptr_t
)dst & (64 - 1)) { leading_pixels = TILE_SIZE - (((uintptr_t
)dst & (64 - 1)) / sizeof(uint16_t)); if (leading_pixels >
W) leading_pixels = W; blt_rotated_90_trivial_565 ( dst, dst_stride
, src, src_stride, leading_pixels, H); dst += leading_pixels;
src += leading_pixels * src_stride; W -= leading_pixels; } if
((uintptr_t)(dst + W) & (64 - 1)) { trailing_pixels = ((
(uintptr_t)(dst + W) & (64 - 1)) / sizeof(uint16_t)); if (
trailing_pixels > W) trailing_pixels = W; W -= trailing_pixels
; } for (x = 0; x < W; x += TILE_SIZE) { blt_rotated_90_trivial_565
( dst + x, dst_stride, src + src_stride * x, src_stride, TILE_SIZE
, H); } if (trailing_pixels) { blt_rotated_90_trivial_565 ( dst
+ W, dst_stride, src + W * src_stride, src_stride, trailing_pixels
, H); } } static void blt_rotated_270_565 (uint16_t *dst, int
dst_stride, const uint16_t *src, int src_stride, int W, int H
) { int x; int leading_pixels = 0, trailing_pixels = 0; const
int TILE_SIZE = 64 / sizeof(uint16_t); if ((uintptr_t)dst &
(64 - 1)) { leading_pixels = TILE_SIZE - (((uintptr_t)dst &
(64 - 1)) / sizeof(uint16_t)); if (leading_pixels > W) leading_pixels
= W; blt_rotated_270_trivial_565 ( dst, dst_stride, src + src_stride
* (W - leading_pixels), src_stride, leading_pixels, H); dst +=
leading_pixels; W -= leading_pixels; } if ((uintptr_t)(dst +
W) & (64 - 1)) { trailing_pixels = (((uintptr_t)(dst + W
) & (64 - 1)) / sizeof(uint16_t)); if (trailing_pixels >
W) trailing_pixels = W; W -= trailing_pixels; src += trailing_pixels
* src_stride; } for (x = 0; x < W; x += TILE_SIZE) { blt_rotated_270_trivial_565
( dst + x, dst_stride, src + src_stride * (W - x - TILE_SIZE
), src_stride, TILE_SIZE, H); } if (trailing_pixels) { blt_rotated_270_trivial_565
( dst + W, dst_stride, src - trailing_pixels * src_stride, src_stride
, trailing_pixels, H); } } static void fast_composite_rotate_90_565
(pixman_implementation_t *imp, pixman_composite_info_t *info
) { __attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height; uint16_t *dst_line; uint16_t *src_line; int dst_stride
, src_stride; int src_x_t, src_y_t; do { uint32_t *__bits__; int
__stride__; __bits__ = dest_image->bits.bits; __stride__ =
dest_image->bits.rowstride; (dst_stride) = __stride__ * (
int) sizeof (uint32_t) / (int) sizeof (uint16_t); (dst_line) =
((uint16_t *) __bits__) + (dst_stride) * (dest_y) + (1) * (dest_x
); } while (0); src_x_t = -src_y + ((int) ((src_image->common
.transform->matrix[0][2] + (((pixman_fixed_t) ((1) <<
16))) / 2 - ((pixman_fixed_t) 1)) >> 16)) - height; src_y_t
= src_x + ((int) ((src_image->common.transform->matrix
[1][2] + (((pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t
) 1)) >> 16)); do { uint32_t *__bits__; int __stride__;
__bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint16_t); (src_line) = ((uint16_t *) __bits__
) + (src_stride) * (src_y_t) + (1) * (src_x_t); } while (0); blt_rotated_90_565
(dst_line, dst_stride, src_line, src_stride, width, height);
} static void fast_composite_rotate_270_565 (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint16_t *dst_line; uint16_t
*src_line; int dst_stride, src_stride; int src_x_t, src_y_t;
do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint16_t); (dst_line) = ((uint16_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); src_x_t = src_y +
((int) ((src_image->common.transform->matrix[0][2] + (
((pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t)
1)) >> 16)); src_y_t = -src_x + ((int) ((src_image->
common.transform->matrix[1][2] + (((pixman_fixed_t) ((1) <<
16))) / 2 - ((pixman_fixed_t) 1)) >> 16)) - width; do {
uint32_t *__bits__; int __stride__; __bits__ = src_image->
bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint16_t
); (src_line) = ((uint16_t *) __bits__) + (src_stride) * (src_y_t
) + (1) * (src_x_t); } while (0); blt_rotated_270_565 (dst_line
, dst_stride, src_line, src_stride, width, height); }
1848FAST_SIMPLE_ROTATE (8888, uint32_t)static void blt_rotated_90_trivial_8888 (uint32_t *dst, int dst_stride
, const uint32_t *src, int src_stride, int w, int h) { int x,
y; for (y = 0; y < h; y++) { const uint32_t *s = src + (h
- y - 1); uint32_t *d = dst + dst_stride * y; for (x = 0; x <
w; x++) { *d++ = *s; s += src_stride; } } } static void blt_rotated_270_trivial_8888
(uint32_t *dst, int dst_stride, const uint32_t *src, int src_stride
, int w, int h) { int x, y; for (y = 0; y < h; y++) { const
uint32_t *s = src + src_stride * (w - 1) + y; uint32_t *d = dst
+ dst_stride * y; for (x = 0; x < w; x++) { *d++ = *s; s -=
src_stride; } } } static void blt_rotated_90_8888 (uint32_t *
dst, int dst_stride, const uint32_t *src, int src_stride, int
W, int H) { int x; int leading_pixels = 0, trailing_pixels =
0; const int TILE_SIZE = 64 / sizeof(uint32_t); if ((uintptr_t
)dst & (64 - 1)) { leading_pixels = TILE_SIZE - (((uintptr_t
)dst & (64 - 1)) / sizeof(uint32_t)); if (leading_pixels >
W) leading_pixels = W; blt_rotated_90_trivial_8888 ( dst, dst_stride
, src, src_stride, leading_pixels, H); dst += leading_pixels;
src += leading_pixels * src_stride; W -= leading_pixels; } if
((uintptr_t)(dst + W) & (64 - 1)) { trailing_pixels = ((
(uintptr_t)(dst + W) & (64 - 1)) / sizeof(uint32_t)); if (
trailing_pixels > W) trailing_pixels = W; W -= trailing_pixels
; } for (x = 0; x < W; x += TILE_SIZE) { blt_rotated_90_trivial_8888
( dst + x, dst_stride, src + src_stride * x, src_stride, TILE_SIZE
, H); } if (trailing_pixels) { blt_rotated_90_trivial_8888 ( dst
+ W, dst_stride, src + W * src_stride, src_stride, trailing_pixels
, H); } } static void blt_rotated_270_8888 (uint32_t *dst, int
dst_stride, const uint32_t *src, int src_stride, int W, int H
) { int x; int leading_pixels = 0, trailing_pixels = 0; const
int TILE_SIZE = 64 / sizeof(uint32_t); if ((uintptr_t)dst &
(64 - 1)) { leading_pixels = TILE_SIZE - (((uintptr_t)dst &
(64 - 1)) / sizeof(uint32_t)); if (leading_pixels > W) leading_pixels
= W; blt_rotated_270_trivial_8888 ( dst, dst_stride, src + src_stride
* (W - leading_pixels), src_stride, leading_pixels, H); dst +=
leading_pixels; W -= leading_pixels; } if ((uintptr_t)(dst +
W) & (64 - 1)) { trailing_pixels = (((uintptr_t)(dst + W
) & (64 - 1)) / sizeof(uint32_t)); if (trailing_pixels >
W) trailing_pixels = W; W -= trailing_pixels; src += trailing_pixels
* src_stride; } for (x = 0; x < W; x += TILE_SIZE) { blt_rotated_270_trivial_8888
( dst + x, dst_stride, src + src_stride * (W - x - TILE_SIZE
), src_stride, TILE_SIZE, H); } if (trailing_pixels) { blt_rotated_270_trivial_8888
( dst + W, dst_stride, src - trailing_pixels * src_stride, src_stride
, trailing_pixels, H); } } static void fast_composite_rotate_90_8888
(pixman_implementation_t *imp, pixman_composite_info_t *info
) { __attribute__((unused)) pixman_op_t op = info->op; __attribute__
((unused)) pixman_image_t * src_image = info->src_image; __attribute__
((unused)) pixman_image_t * mask_image = info->mask_image;
__attribute__((unused)) pixman_image_t * dest_image = info->
dest_image; __attribute__((unused)) int32_t src_x = info->
src_x; __attribute__((unused)) int32_t src_y = info->src_y
; __attribute__((unused)) int32_t mask_x = info->mask_x; __attribute__
((unused)) int32_t mask_y = info->mask_y; __attribute__((unused
)) int32_t dest_x = info->dest_x; __attribute__((unused)) int32_t
dest_y = info->dest_y; __attribute__((unused)) int32_t width
= info->width; __attribute__((unused)) int32_t height = info
->height; uint32_t *dst_line; uint32_t *src_line; int dst_stride
, src_stride; int src_x_t, src_y_t; do { uint32_t *__bits__; int
__stride__; __bits__ = dest_image->bits.bits; __stride__ =
dest_image->bits.rowstride; (dst_stride) = __stride__ * (
int) sizeof (uint32_t) / (int) sizeof (uint32_t); (dst_line) =
((uint32_t *) __bits__) + (dst_stride) * (dest_y) + (1) * (dest_x
); } while (0); src_x_t = -src_y + ((int) ((src_image->common
.transform->matrix[0][2] + (((pixman_fixed_t) ((1) <<
16))) / 2 - ((pixman_fixed_t) 1)) >> 16)) - height; src_y_t
= src_x + ((int) ((src_image->common.transform->matrix
[1][2] + (((pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t
) 1)) >> 16)); do { uint32_t *__bits__; int __stride__;
__bits__ = src_image->bits.bits; __stride__ = src_image->
bits.rowstride; (src_stride) = __stride__ * (int) sizeof (uint32_t
) / (int) sizeof (uint32_t); (src_line) = ((uint32_t *) __bits__
) + (src_stride) * (src_y_t) + (1) * (src_x_t); } while (0); blt_rotated_90_8888
(dst_line, dst_stride, src_line, src_stride, width, height);
} static void fast_composite_rotate_270_8888 (pixman_implementation_t
*imp, pixman_composite_info_t *info) { __attribute__((unused
)) pixman_op_t op = info->op; __attribute__((unused)) pixman_image_t
* src_image = info->src_image; __attribute__((unused)) pixman_image_t
* mask_image = info->mask_image; __attribute__((unused)) pixman_image_t
* dest_image = info->dest_image; __attribute__((unused)) int32_t
src_x = info->src_x; __attribute__((unused)) int32_t src_y
= info->src_y; __attribute__((unused)) int32_t mask_x = info
->mask_x; __attribute__((unused)) int32_t mask_y = info->
mask_y; __attribute__((unused)) int32_t dest_x = info->dest_x
; __attribute__((unused)) int32_t dest_y = info->dest_y; __attribute__
((unused)) int32_t width = info->width; __attribute__((unused
)) int32_t height = info->height; uint32_t *dst_line; uint32_t
*src_line; int dst_stride, src_stride; int src_x_t, src_y_t;
do { uint32_t *__bits__; int __stride__; __bits__ = dest_image
->bits.bits; __stride__ = dest_image->bits.rowstride; (
dst_stride) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof
(uint32_t); (dst_line) = ((uint32_t *) __bits__) + (dst_stride
) * (dest_y) + (1) * (dest_x); } while (0); src_x_t = src_y +
((int) ((src_image->common.transform->matrix[0][2] + (
((pixman_fixed_t) ((1) << 16))) / 2 - ((pixman_fixed_t)
1)) >> 16)); src_y_t = -src_x + ((int) ((src_image->
common.transform->matrix[1][2] + (((pixman_fixed_t) ((1) <<
16))) / 2 - ((pixman_fixed_t) 1)) >> 16)) - width; do {
uint32_t *__bits__; int __stride__; __bits__ = src_image->
bits.bits; __stride__ = src_image->bits.rowstride; (src_stride
) = __stride__ * (int) sizeof (uint32_t) / (int) sizeof (uint32_t
); (src_line) = ((uint32_t *) __bits__) + (src_stride) * (src_y_t
) + (1) * (src_x_t); } while (0); blt_rotated_270_8888 (dst_line
, dst_stride, src_line, src_stride, width, height); }
1849
1850static const pixman_fast_path_t c_fast_paths[] =
1851{
1852 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r5g6b5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8_0565
}
,
1853 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b5g6r5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8_0565
}
,
1854 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r8g8b8, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8_0888
}
,
1855 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b8g8r8, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8_0888
}
,
1856 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8_8888
}
,
1857 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8_8888
}
,
1858 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8_8888
}
,
1859 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_8_8888
}
,
1860 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a1 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_1_8888
}
,
1861 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a1 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_1_8888
}
,
1862 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a1 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_1_8888
}
,
1863 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a1 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_1_8888
}
,
1864 PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a1 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r5g6b5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_1_0565
}
,
1865 PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a1, ((PIXMAN_a1 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a1 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b5g6r5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_n_1_0565
}
,
1866 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8r8g8b8, ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((0) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8
== (((0) << 24) | ((1) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))) | (1 << 8)))
, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_over_n_8888_8888_ca }
,
1867 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8r8g8b8, ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((0) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8
== (((0) << 24) | ((1) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))) | (1 << 8)))
, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_over_n_8888_8888_ca }
,
1868 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8r8g8b8, ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((0) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8
== (((0) << 24) | ((1) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))) | (1 << 8)))
, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_over_n_8888_0565_ca }
,
1869 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8b8g8r8, ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((0) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8
== (((0) << 24) | ((1) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))) | (1 << 8)))
, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_over_n_8888_8888_ca }
,
1870 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8b8g8r8, ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((0) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8
== (((0) << 24) | ((1) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))) | (1 << 8)))
, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_over_n_8888_8888_ca }
,
1871 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca){ PIXMAN_OP_OVER, (((0) << 24) | ((1) << 16) | ((
0) << 12) | ((0) << 8) | ((0) << 4) | ((0))
), (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8b8g8r8, ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((0) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8
== (((0) << 24) | ((1) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))) | (1 << 8)))
, PIXMAN_b5g6r5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_over_n_8888_0565_ca }
,
1872 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888){ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8
== (((0) << 24) | ((0) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1
<< 2) | (1 << 5) | (1 << 1) | (1 << 6
)) | ((PIXMAN_a8 == (((0) << 24) | ((1) << 16) | (
(0) << 12) | ((0) << 8) | ((0) << 4) | ((0)
))) ? 0 : ((1 << 23) | (1 << 11) | (1 << 0)
))) | (1 << 9))), PIXMAN_x8r8g8b8, ((1 << 5) | (1
<< 1) | (1 << 6)), fast_composite_over_x888_8_8888
}
,
1873 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888){ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8
== (((0) << 24) | ((0) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1
<< 2) | (1 << 5) | (1 << 1) | (1 << 6
)) | ((PIXMAN_a8 == (((0) << 24) | ((1) << 16) | (
(0) << 12) | ((0) << 8) | ((0) << 4) | ((0)
))) ? 0 : ((1 << 23) | (1 << 11) | (1 << 0)
))) | (1 << 9))), PIXMAN_a8r8g8b8, ((1 << 5) | (1
<< 1) | (1 << 6)), fast_composite_over_x888_8_8888
}
,
1874 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888){ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8
== (((0) << 24) | ((0) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1
<< 2) | (1 << 5) | (1 << 1) | (1 << 6
)) | ((PIXMAN_a8 == (((0) << 24) | ((1) << 16) | (
(0) << 12) | ((0) << 8) | ((0) << 4) | ((0)
))) ? 0 : ((1 << 23) | (1 << 11) | (1 << 0)
))) | (1 << 9))), PIXMAN_x8b8g8r8, ((1 << 5) | (1
<< 1) | (1 << 6)), fast_composite_over_x888_8_8888
}
,
1875 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888){ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8
== (((0) << 24) | ((0) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1
<< 2) | (1 << 5) | (1 << 1) | (1 << 6
)) | ((PIXMAN_a8 == (((0) << 24) | ((1) << 16) | (
(0) << 12) | ((0) << 8) | ((0) << 4) | ((0)
))) ? 0 : ((1 << 23) | (1 << 11) | (1 << 0)
))) | (1 << 9))), PIXMAN_a8b8g8r8, ((1 << 5) | (1
<< 1) | (1 << 6)), fast_composite_over_x888_8_8888
}
,
1876 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_8888_8888
}
,
1877 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_8888_8888
}
,
1878 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r5g6b5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_8888_0565
}
,
1879 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_8888_8888
}
,
1880 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_8888_8888
}
,
1881 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b5g6r5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_over_8888_0565
}
,
1882 PIXMAN_STD_FAST_PATH (ADD, r5g6b5, null, r5g6b5, fast_composite_add_0565_0565){ PIXMAN_OP_ADD, PIXMAN_r5g6b5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_r5g6b5 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r5g6b5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_add_0565_0565
}
,
1883 PIXMAN_STD_FAST_PATH (ADD, b5g6r5, null, b5g6r5, fast_composite_add_0565_0565){ PIXMAN_OP_ADD, PIXMAN_b5g6r5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b5g6r5 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b5g6r5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_add_0565_0565
}
,
1884 PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888){ PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_add_8888_8888
}
,
1885 PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888){ PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_add_8888_8888
}
,
1886 PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8){ PIXMAN_OP_ADD, PIXMAN_a8, (((1 << 2) | (1 << 5)
| (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_a8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_add_8_8 }
,
1887 PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1_1){ PIXMAN_OP_ADD, PIXMAN_a1, (((1 << 2) | (1 << 5)
| (1 << 1) | (1 << 6)) | ((PIXMAN_a1 == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_a1, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_add_1_1 }
,
1888 PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca){ PIXMAN_OP_ADD, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8r8g8b8, ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((0) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8
== (((0) << 24) | ((1) << 16) | ((0) << 12
) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))) | (1 << 8)))
, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_add_n_8888_8888_ca }
,
1889 PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8){ PIXMAN_OP_ADD, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_add_n_8_8
}
,
1890 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_a8r8g8b8, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_solid_fill }
,
1891 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_x8r8g8b8, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_solid_fill }
,
1892 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_a8b8g8r8, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_solid_fill }
,
1893 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_x8b8g8r8, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_solid_fill }
,
1894 PIXMAN_STD_FAST_PATH (SRC, solid, null, a1, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_a1, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_solid_fill }
,
1895 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_a8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_solid_fill }
,
1896 PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill){ PIXMAN_OP_SRC, (((0) << 24) | ((1) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, (((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_r5g6b5, ((1 << 5) | (
1 << 1) | (1 << 6)), fast_composite_solid_fill }
,
1897 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, fast_composite_src_x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_x888_8888
}
,
1898 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, fast_composite_src_x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_x888_8888
}
,
1899 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1900 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1901 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8r8g8b8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1902 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1903 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1904 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x8b8g8r8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1905 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8x8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b8g8r8a8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b8g8r8a8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b8g8r8x8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1906 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8a8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b8g8r8a8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b8g8r8a8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b8g8r8a8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1907 PIXMAN_STD_FAST_PATH (SRC, b8g8r8x8, null, b8g8r8x8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b8g8r8x8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b8g8r8x8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b8g8r8x8,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1908 PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_r5g6b5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_r5g6b5 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r5g6b5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1909 PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b5g6r5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b5g6r5 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b5g6r5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1910 PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_r8g8b8 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r8g8b8, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1911 PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, b8g8r8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_b8g8r8 == (
((0) << 24) | ((1) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b8g8r8, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1912 PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_x1r5g5b5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x1r5g5b5 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x1r5g5b5,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1913 PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a1r5g5b5, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a1r5g5b5 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_x1r5g5b5,
((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_memcpy
}
,
1914 PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, fast_composite_src_memcpy){ PIXMAN_OP_SRC, PIXMAN_a8, (((1 << 2) | (1 << 5)
| (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_a8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_src_memcpy }
,
1915 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r5g6b5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_x888_0565
}
,
1916 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8r8g8b8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_r5g6b5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_x888_0565
}
,
1917 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b5g6r5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_x888_0565
}
,
1918 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_x8b8g8r8 ==
(((0) << 24) | ((1) << 16) | ((0) << 12) |
((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((1 <<
23) | (1 << 11) | (1 << 0)))), (((0) << 24
) | ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))), (((((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))) == (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0)))) ? 0 : ((
((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_b5g6r5, (
(1 << 5) | (1 << 1) | (1 << 6)), fast_composite_src_x888_0565
}
,
1919 PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8){ PIXMAN_OP_IN, PIXMAN_a8, (((1 << 2) | (1 << 5) |
(1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), (((((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | (((((0) << 24)
| ((0) << 16) | ((0) << 12) | ((0) << 8) |
((0) << 4) | ((0))) == (((0) << 24) | ((1) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0)))) ? 0 : ((1 << 23) | (1 << 11) | (1 <<
0)))) | (1 << 9))), PIXMAN_a8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_in_8_8 }
,
1920 PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8){ PIXMAN_OP_IN, (((0) << 24) | ((1) << 16) | ((0)
<< 12) | ((0) << 8) | ((0) << 4) | ((0))),
(((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6)) | (((((0) << 24) | ((1) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))) == (((0) <<
24) | ((1) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))), PIXMAN_a8, ((PIXMAN_a8 == (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0)))) ? 0 : ((((1 << 2) | (1 <<
5) | (1 << 1) | (1 << 6)) | ((PIXMAN_a8 == (((0)
<< 24) | ((1) << 16) | ((0) << 12) | ((0) <<
8) | ((0) << 4) | ((0)))) ? 0 : ((1 << 23) | (1 <<
11) | (1 << 0)))) | (1 << 9))), PIXMAN_a8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_in_n_8_8
}
,
1921
1922 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_x8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1923 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1924 SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_x8b8g8r8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1925 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8b8g8r8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1926
1927 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1928 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, 8888_8888){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8b8g8r8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_SRC, }
,
1929
1930 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, 8888_565){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_x8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_normal_SRC, }
,
1931 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, 8888_565){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_none_SRC, }, { PIXMAN_OP_SRC
, PIXMAN_a8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, (((1 << 10) | (1
<< 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_normal_SRC, }
,
1932
1933 SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, 565_565){ PIXMAN_OP_SRC, PIXMAN_r5g6b5, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_565_565_cover_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_r5g6b5, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 14) | (1 << 3) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_565_565_none_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_r5g6b5, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_565_565_pad_SRC
, }, { PIXMAN_OP_SRC, PIXMAN_r5g6b5, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 3) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_565_565_normal_SRC
, }
,
1934
1935 SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8r8g8b8, a8r8g8b8, x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_x888_8888_cover_SRC
, }
,
1936 SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8b8g8r8, a8b8g8r8, x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_x888_8888_cover_SRC
, }
,
1937 SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8r8g8b8, a8r8g8b8, x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_x888_8888_pad_SRC
, }
,
1938 SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8b8g8r8, a8b8g8r8, x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_x888_8888_pad_SRC
, }
,
1939 SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8r8g8b8, a8r8g8b8, x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 3) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_x888_8888_normal_SRC
, }
,
1940 SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8b8g8r8, a8b8g8r8, x888_8888){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 3) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_x888_8888_normal_SRC
, }
,
1941
1942 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, 8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_OVER, }, {
PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_OVER, }
,
1943 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, 8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_OVER, }, {
PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_x8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_OVER, }
,
1944 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, 8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_OVER, }, {
PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_OVER, }
,
1945 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, 8888_8888){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_none_OVER, }, {
PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | ((
1 << 15) | (1 << 14) | (1 << 4)) | (1 <<
16)), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_8888_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_a8b8g8r8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_8888_normal_OVER, }
,
1946
1947 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, 8888_565){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)) | (1
<< 23), (((0) << 24) | ((0) << 16) | ((0) <<
12) | ((0) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_cover_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 14) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_none_OVER, }, { PIXMAN_OP_OVER
, PIXMAN_a8r8g8b8, (((1 << 10) | (1 << 1) | (1 <<
11) | (1 << 5) | (1 << 6)) | ((1 << 15) | (
1 << 14) | (1 << 4)) | (1 << 16)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_r5g6b5, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest_8888_565_pad_OVER
, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, (((1 << 10) | (
1 << 1) | (1 << 11) | (1 << 5) | (1 <<
6)) | ((1 << 15) | (1 << 3) | (1 << 4)) | (
1 << 16)), (((0) << 24) | ((0) << 16) | ((0
) << 12) | ((0) << 8) | ((0) << 4) | ((0)))
, 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_scaled_nearest_8888_565_normal_OVER, }
,
1948
1949#define NEAREST_FAST_PATH(op,s,d){ PIXMAN_OP_op, PIXMAN_s, ((1 << 10) | (1 << 1) |
(1 << 11) | (1 << 5) | (1 << 6)), (((0) <<
24) | ((0) << 16) | ((0) << 12) | ((0) << 8
) | ((0) << 4) | ((0))), 0, PIXMAN_d, ((1 << 5) |
(1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
\
1950 { PIXMAN_OP_ ## op, \
1951 PIXMAN_ ## s, SCALED_NEAREST_FLAGS((1 << 10) | (1 << 1) | (1 << 11) | (1 <<
5) | (1 << 6))
, \
1952 PIXMAN_null(((0) << 24) | ((0) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))
, 0, \
1953 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS((1 << 5) | (1 << 1) | (1 << 6)), \
1954 fast_composite_scaled_nearest, \
1955 }
1956
1957 NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1958 NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1959 NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1960 NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1961
1962 NEAREST_FAST_PATH (SRC, x8r8g8b8, a8r8g8b8){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1963 NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1964 NEAREST_FAST_PATH (SRC, x8b8g8r8, a8b8g8r8){ PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1965 NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8){ PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1966
1967 NEAREST_FAST_PATH (OVER, x8r8g8b8, x8r8g8b8){ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1968 NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1969 NEAREST_FAST_PATH (OVER, x8b8g8r8, x8b8g8r8){ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1970 NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_x8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1971
1972 NEAREST_FAST_PATH (OVER, x8r8g8b8, a8r8g8b8){ PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1973 NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8){ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8r8g8b8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1974 NEAREST_FAST_PATH (OVER, x8b8g8r8, a8b8g8r8){ PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1975 NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8){ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, ((1 << 10) | (1 <<
1) | (1 << 11) | (1 << 5) | (1 << 6)), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8b8g8r8
, ((1 << 5) | (1 << 1) | (1 << 6)), fast_composite_scaled_nearest
, }
,
1976
1977#define SIMPLE_ROTATE_FLAGS(angle)(FAST_PATH_ROTATE_angle_TRANSFORM | (1 << 11) | (1 <<
23) | ((1 << 2) | (1 << 5) | (1 << 1) | (1
<< 6)))
\
1978 (FAST_PATH_ROTATE_ ## angle ## _TRANSFORM | \
1979 FAST_PATH_NEAREST_FILTER(1 << 11) | \
1980 FAST_PATH_SAMPLES_COVER_CLIP_NEAREST(1 << 23) | \
1981 FAST_PATH_STANDARD_FLAGS((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6))
)
1982
1983#define SIMPLE_ROTATE_FAST_PATH(op,s,d,suffix){ PIXMAN_OP_op, PIXMAN_s, ((1 << 20) | (1 << 11) |
(1 << 23) | ((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), 0, PIXMAN_d, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_rotate_90_suffix, }, { PIXMAN_OP_op, PIXMAN_s
, ((1 << 22) | (1 << 11) | (1 << 23) | ((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6))), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_d, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_rotate_270_suffix
, }
\
1984 { PIXMAN_OP_ ## op, \
1985 PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (90)((1 << 20) | (1 << 11) | (1 << 23) | ((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)))
, \
1986 PIXMAN_null(((0) << 24) | ((0) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))
, 0, \
1987 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS((1 << 5) | (1 << 1) | (1 << 6)), \
1988 fast_composite_rotate_90_##suffix, \
1989 }, \
1990 { PIXMAN_OP_ ## op, \
1991 PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (270)((1 << 22) | (1 << 11) | (1 << 23) | ((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6)))
, \
1992 PIXMAN_null(((0) << 24) | ((0) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))
, 0, \
1993 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS((1 << 5) | (1 << 1) | (1 << 6)), \
1994 fast_composite_rotate_270_##suffix, \
1995 }
1996
1997 SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 20) | (1 <<
11) | (1 << 23) | ((1 << 2) | (1 << 5) | (
1 << 1) | (1 << 6))), (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))), 0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_rotate_90_8888, }, { PIXMAN_OP_SRC
, PIXMAN_a8r8g8b8, ((1 << 22) | (1 << 11) | (1 <<
23) | ((1 << 2) | (1 << 5) | (1 << 1) | (1
<< 6))), (((0) << 24) | ((0) << 16) | ((0)
<< 12) | ((0) << 8) | ((0) << 4) | ((0))),
0, PIXMAN_a8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_rotate_270_8888, }
,
1998 SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888){ PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, ((1 << 20) | (1 <<
11) | (1 << 23) | ((1 << 2) | (1 << 5) | (
1 << 1) | (1 << 6))), (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))), 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_rotate_90_8888, }, { PIXMAN_OP_SRC
, PIXMAN_a8r8g8b8, ((1 << 22) | (1 << 11) | (1 <<
23) | ((1 << 2) | (1 << 5) | (1 << 1) | (1
<< 6))), (((0) << 24) | ((0) << 16) | ((0)
<< 12) | ((0) << 8) | ((0) << 4) | ((0))),
0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_rotate_270_8888, }
,
1999 SIMPLE_ROTATE_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888){ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, ((1 << 20) | (1 <<
11) | (1 << 23) | ((1 << 2) | (1 << 5) | (
1 << 1) | (1 << 6))), (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))), 0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 <<
1) | (1 << 6)), fast_composite_rotate_90_8888, }, { PIXMAN_OP_SRC
, PIXMAN_x8r8g8b8, ((1 << 22) | (1 << 11) | (1 <<
23) | ((1 << 2) | (1 << 5) | (1 << 1) | (1
<< 6))), (((0) << 24) | ((0) << 16) | ((0)
<< 12) | ((0) << 8) | ((0) << 4) | ((0))),
0, PIXMAN_x8r8g8b8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_rotate_270_8888, }
,
2000 SIMPLE_ROTATE_FAST_PATH (SRC, r5g6b5, r5g6b5, 565){ PIXMAN_OP_SRC, PIXMAN_r5g6b5, ((1 << 20) | (1 <<
11) | (1 << 23) | ((1 << 2) | (1 << 5) | (
1 << 1) | (1 << 6))), (((0) << 24) | ((0) <<
16) | ((0) << 12) | ((0) << 8) | ((0) << 4
) | ((0))), 0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1
) | (1 << 6)), fast_composite_rotate_90_565, }, { PIXMAN_OP_SRC
, PIXMAN_r5g6b5, ((1 << 22) | (1 << 11) | (1 <<
23) | ((1 << 2) | (1 << 5) | (1 << 1) | (1
<< 6))), (((0) << 24) | ((0) << 16) | ((0)
<< 12) | ((0) << 8) | ((0) << 4) | ((0))),
0, PIXMAN_r5g6b5, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_rotate_270_565, }
,
2001 SIMPLE_ROTATE_FAST_PATH (SRC, a8, a8, 8){ PIXMAN_OP_SRC, PIXMAN_a8, ((1 << 20) | (1 << 11
) | (1 << 23) | ((1 << 2) | (1 << 5) | (1 <<
1) | (1 << 6))), (((0) << 24) | ((0) << 16
) | ((0) << 12) | ((0) << 8) | ((0) << 4) |
((0))), 0, PIXMAN_a8, ((1 << 5) | (1 << 1) | (1 <<
6)), fast_composite_rotate_90_8, }, { PIXMAN_OP_SRC, PIXMAN_a8
, ((1 << 22) | (1 << 11) | (1 << 23) | ((1 <<
2) | (1 << 5) | (1 << 1) | (1 << 6))), (((
0) << 24) | ((0) << 16) | ((0) << 12) | ((0
) << 8) | ((0) << 4) | ((0))), 0, PIXMAN_a8, ((1 <<
5) | (1 << 1) | (1 << 6)), fast_composite_rotate_270_8
, }
,
2002
2003 /* Simple repeat fast path entry. */
2004 { PIXMAN_OP_any(PIXMAN_N_OPERATORS + 1),
2005 PIXMAN_any(((0) << 24) | ((5) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))
,
2006 (FAST_PATH_STANDARD_FLAGS((1 << 2) | (1 << 5) | (1 << 1) | (1 <<
6))
| FAST_PATH_ID_TRANSFORM(1 << 0) | FAST_PATH_BITS_IMAGE(1 << 25) |
2007 FAST_PATH_NORMAL_REPEAT((1 << 15) | (1 << 3) | (1 << 4))),
2008 PIXMAN_any(((0) << 24) | ((5) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))
, 0,
2009 PIXMAN_any(((0) << 24) | ((5) << 16) | ((0) << 12) | (
(0) << 8) | ((0) << 4) | ((0)))
, FAST_PATH_STD_DEST_FLAGS((1 << 5) | (1 << 1) | (1 << 6)),
2010 fast_composite_tiled_repeat
2011 },
2012
2013 { PIXMAN_OP_NONE },
2014};
2015
2016#ifdef WORDS_BIGENDIAN1
2017#define A1_FILL_MASK(n, offs)(((1U << (n)) - 1) << (32 - (offs) - (n))) (((1U << (n)) - 1) << (32 - (offs) - (n)))
2018#else
2019#define A1_FILL_MASK(n, offs)(((1U << (n)) - 1) << (32 - (offs) - (n))) (((1U << (n)) - 1) << (offs))
2020#endif
2021
2022static force_inline__inline__ __attribute__ ((__always_inline__)) void
2023pixman_fill1_line (uint32_t *dst, int offs, int width, int v)
2024{
2025 if (offs)
2026 {
2027 int leading_pixels = 32 - offs;
2028 if (leading_pixels >= width)
2029 {
2030 if (v)
2031 *dst |= A1_FILL_MASK (width, offs)(((1U << (width)) - 1) << (32 - (offs) - (width))
)
;
2032 else
2033 *dst &= ~A1_FILL_MASK (width, offs)(((1U << (width)) - 1) << (32 - (offs) - (width))
)
;
2034 return;
2035 }
2036 else
2037 {
2038 if (v)
2039 *dst++ |= A1_FILL_MASK (leading_pixels, offs)(((1U << (leading_pixels)) - 1) << (32 - (offs) -
(leading_pixels)))
;
2040 else
2041 *dst++ &= ~A1_FILL_MASK (leading_pixels, offs)(((1U << (leading_pixels)) - 1) << (32 - (offs) -
(leading_pixels)))
;
2042 width -= leading_pixels;
2043 }
2044 }
2045 while (width >= 32)
2046 {
2047 if (v)
2048 *dst++ = 0xFFFFFFFF;
2049 else
2050 *dst++ = 0;
2051 width -= 32;
2052 }
2053 if (width > 0)
2054 {
2055 if (v)
2056 *dst |= A1_FILL_MASK (width, 0)(((1U << (width)) - 1) << (32 - (0) - (width)));
2057 else
2058 *dst &= ~A1_FILL_MASK (width, 0)(((1U << (width)) - 1) << (32 - (0) - (width)));
2059 }
2060}
2061
2062static void
2063pixman_fill1 (uint32_t *bits,
2064 int stride,
2065 int x,
2066 int y,
2067 int width,
2068 int height,
2069 uint32_t xor)
2070{
2071 uint32_t *dst = bits + y * stride + (x >> 5);
2072 int offs = x & 31;
2073
2074 if (xor & 1)
2075 {
2076 while (height--)
2077 {
2078 pixman_fill1_line (dst, offs, width, 1);
2079 dst += stride;
2080 }
2081 }
2082 else
2083 {
2084 while (height--)
2085 {
2086 pixman_fill1_line (dst, offs, width, 0);
2087 dst += stride;
2088 }
2089 }
2090}
2091
2092static void
2093pixman_fill8 (uint32_t *bits,
2094 int stride,
2095 int x,
2096 int y,
2097 int width,
2098 int height,
2099 uint32_t xor)
2100{
2101 int byte_stride = stride * (int) sizeof (uint32_t);
2102 uint8_t *dst = (uint8_t *) bits;
2103 uint8_t v = xor & 0xff;
2104 int i;
2105
2106 dst = dst + y * byte_stride + x;
2107
2108 while (height--)
2109 {
2110 for (i = 0; i < width; ++i)
2111 dst[i] = v;
2112
2113 dst += byte_stride;
2114 }
2115}
2116
2117static void
2118pixman_fill16 (uint32_t *bits,
2119 int stride,
2120 int x,
2121 int y,
2122 int width,
2123 int height,
2124 uint32_t xor)
2125{
2126 int short_stride =
2127 (stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t);
2128 uint16_t *dst = (uint16_t *)bits;
2129 uint16_t v = xor & 0xffff;
2130 int i;
2131
2132 dst = dst + y * short_stride + x;
2133
2134 while (height--)
2135 {
2136 for (i = 0; i < width; ++i)
2137 dst[i] = v;
2138
2139 dst += short_stride;
2140 }
2141}
2142
2143static void
2144pixman_fill32 (uint32_t *bits,
2145 int stride,
2146 int x,
2147 int y,
2148 int width,
2149 int height,
2150 uint32_t xor)
2151{
2152 int i;
2153
2154 bits = bits + y * stride + x;
2155
2156 while (height--)
2157 {
2158 for (i = 0; i < width; ++i)
2159 bits[i] = xor;
2160
2161 bits += stride;
2162 }
2163}
2164
2165static pixman_bool_t
2166fast_path_fill (pixman_implementation_t *imp,
2167 uint32_t * bits,
2168 int stride,
2169 int bpp,
2170 int x,
2171 int y,
2172 int width,
2173 int height,
2174 uint32_t xor)
2175{
2176 switch (bpp)
2177 {
2178 case 1:
2179 pixman_fill1 (bits, stride, x, y, width, height, xor);
2180 break;
2181
2182 case 8:
2183 pixman_fill8 (bits, stride, x, y, width, height, xor);
2184 break;
2185
2186 case 16:
2187 pixman_fill16 (bits, stride, x, y, width, height, xor);
2188 break;
2189
2190 case 32:
2191 pixman_fill32 (bits, stride, x, y, width, height, xor);
2192 break;
2193
2194 default:
2195 return _pixman_implementation_fill (
2196 imp->delegate, bits, stride, bpp, x, y, width, height, xor);
2197 break;
2198 }
2199
2200 return TRUE1;
2201}
2202
2203pixman_implementation_t *
2204_pixman_implementation_create_fast_path (pixman_implementation_t *fallback)
2205{
2206 pixman_implementation_t *imp = _pixman_implementation_create (fallback, c_fast_paths);
2207
2208 imp->fill = fast_path_fill;
2209
2210 return imp;
2211}