Lines Matching refs:yo
259 * an X offset 'x0' or 'xo' and a Y offset 'yo.'
261 uint32_t xo, yo;
265 for (yo = y0 * xtile_width; yo < y1 * xtile_width; yo += xtile_width) {
267 * Only 'yo' contributes to those bits in the total offset,
272 uint32_t swizzle = ((yo >> 3) ^ (yo >> 4)) & swizzle_bit;
274 mem_copy(dst + ((x0 + yo) ^ swizzle), src + x0, x1 - x0);
277 mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + xo, xtile_span);
280 mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2);
307 * an X offset 'xo0' or 'xo' and a Y offset 'yo.'
326 uint32_t x, yo;
331 for (yo = y0 * column_width; yo < y1 * column_width; yo += column_width) {
335 mem_copy(dst + ((xo0 + yo) ^ swizzle0), src + x0, x1 - x0);
341 mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x, ytile_span);
346 mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2);
352 for (yo = y1 * column_width; yo < y2 * column_width; yo += 4 * column_width) {
357 mem_copy(dst + ((xo0 + yo + 0 * column_width) ^ swizzle0), src + x0 + 0 * src_pitch, x1 - x0);
358 mem_copy(dst + ((xo0 + yo + 1 * column_width) ^ swizzle0), src + x0 + 1 * src_pitch, x1 - x0);
359 mem_copy(dst + ((xo0 + yo + 2 * column_width) ^ swizzle0), src + x0 + 2 * src_pitch, x1 - x0);
360 mem_copy(dst + ((xo0 + yo + 3 * column_width) ^ swizzle0), src + x0 + 3 * src_pitch, x1 - x0);
367 mem_copy_align16(dst + ((xo + yo + 0 * column_width) ^ swizzle), src + x + 0 * src_pitch, ytile_span);
368 mem_copy_align16(dst + ((xo + yo + 1 * column_width) ^ swizzle), src + x + 1 * src_pitch, ytile_span);
369 mem_copy_align16(dst + ((xo + yo + 2 * column_width) ^ swizzle), src + x + 2 * src_pitch, ytile_span);
370 mem_copy_align16(dst + ((xo + yo + 3 * column_width) ^ swizzle), src + x + 3 * src_pitch, ytile_span);
376 mem_copy_align16(dst + ((xo + yo + 0 * column_width) ^ swizzle), src + x2 + 0 * src_pitch, x3 - x2);
377 mem_copy_align16(dst + ((xo + yo + 1 * column_width) ^ swizzle), src + x2 + 1 * src_pitch, x3 - x2);
378 mem_copy_align16(dst + ((xo + yo + 2 * column_width) ^ swizzle), src + x2 + 2 * src_pitch, x3 - x2);
379 mem_copy_align16(dst + ((xo + yo + 3 * column_width) ^ swizzle), src + x2 + 3 * src_pitch, x3 - x2);
386 for (yo = y2 * column_width; yo < y3 * column_width; yo += column_width) {
390 mem_copy(dst + ((xo0 + yo) ^ swizzle0), src + x0, x1 - x0);
396 mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x, ytile_span);
401 mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2);
423 * an X offset 'x0' or 'xo' and a Y offset 'yo.'
425 uint32_t xo, yo;
429 for (yo = y0 * xtile_width; yo < y1 * xtile_width; yo += xtile_width) {
431 * Only 'yo' contributes to those bits in the total offset,
436 uint32_t swizzle = ((yo >> 3) ^ (yo >> 4)) & swizzle_bit;
438 mem_copy(dst + x0, src + ((x0 + yo) ^ swizzle), x1 - x0);
441 mem_copy_align16(dst + xo, src + ((xo + yo) ^ swizzle), xtile_span);
444 mem_copy_align16(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2);
471 * an X offset 'xo0' or 'xo' and a Y offset 'yo.'
490 uint32_t x, yo;
495 for (yo = y0 * column_width; yo < y1 * column_width; yo += column_width) {
499 mem_copy(dst + x0, src + ((xo0 + yo) ^ swizzle0), x1 - x0);
505 mem_copy_align16(dst + x, src + ((xo + yo) ^ swizzle), ytile_span);
510 mem_copy_align16(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2);
516 for (yo = y1 * column_width; yo < y2 * column_width; yo += 4 * column_width) {
521 mem_copy(dst + x0 + 0 * dst_pitch, src + ((xo0 + yo + 0 * column_width) ^ swizzle0), x1 - x0);
522 mem_copy(dst + x0 + 1 * dst_pitch, src + ((xo0 + yo + 1 * column_width) ^ swizzle0), x1 - x0);
523 mem_copy(dst + x0 + 2 * dst_pitch, src + ((xo0 + yo + 2 * column_width) ^ swizzle0), x1 - x0);
524 mem_copy(dst + x0 + 3 * dst_pitch, src + ((xo0 + yo + 3 * column_width) ^ swizzle0), x1 - x0);
531 mem_copy_align16(dst + x + 0 * dst_pitch, src + ((xo + yo + 0 * column_width) ^ swizzle), ytile_span);
532 mem_copy_align16(dst + x + 1 * dst_pitch, src + ((xo + yo + 1 * column_width) ^ swizzle), ytile_span);
533 mem_copy_align16(dst + x + 2 * dst_pitch, src + ((xo + yo + 2 * column_width) ^ swizzle), ytile_span);
534 mem_copy_align16(dst + x + 3 * dst_pitch, src + ((xo + yo + 3 * column_width) ^ swizzle), ytile_span);
540 mem_copy_align16(dst + x2 + 0 * dst_pitch, src + ((xo + yo + 0 * column_width) ^ swizzle), x3 - x2);
541 mem_copy_align16(dst + x2 + 1 * dst_pitch, src + ((xo + yo + 1 * column_width) ^ swizzle), x3 - x2);
542 mem_copy_align16(dst + x2 + 2 * dst_pitch, src + ((xo + yo + 2 * column_width) ^ swizzle), x3 - x2);
543 mem_copy_align16(dst + x2 + 3 * dst_pitch, src + ((xo + yo + 3 * column_width) ^ swizzle), x3 - x2);
550 for (yo = y2 * column_width; yo < y3 * column_width; yo += column_width) {
554 mem_copy(dst + x0, src + ((xo0 + yo) ^ swizzle0), x1 - x0);
560 mem_copy_align16(dst + x, src + ((xo + yo) ^ swizzle), ytile_span);
565 mem_copy_align16(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2);