2 Copyright 1993-2001 by Easy Software Products.
3 Copyright 1991, 1996, 1997, 1998 Aladdin Enterprises. All rights reserved.
5 This file is part of GNU Ghostscript.
7 GNU Ghostscript is distributed in the hope that it will be useful, but
8 WITHOUT ANY WARRANTY. No author or distributor accepts responsibility
9 to anyone for the consequences of using it or for whether it serves any
10 particular purpose or works at all, unless he says so in writing. Refer
11 to the GNU General Public License for full details.
13 Everyone is granted permission to copy, modify and redistribute GNU
14 Ghostscript, but only under the conditions described in the GNU General
15 Public License. A copy of this license is supposed to have been given
16 to you along with GNU Ghostscript so you can know your rights and
17 responsibilities. It should be in a file named COPYING. Among other
18 things, the copyright notice and this notice must be preserved on all
21 Aladdin Enterprises supports the work of the GNU Project, but is not
22 affiliated with the Free Software Foundation or the GNU Project. GNU
23 Ghostscript, as distributed by Aladdin Enterprises, does not require any
24 GNU software to build or run it.
28 /* Command list document- and page-level code. */
36 #include "gxdevmem.h" /* must precede gxcldev.h */
41 /* Forward declarations of driver procedures */
42 private dev_proc_open_device(clist_open
);
43 private dev_proc_output_page(clist_output_page
);
44 private dev_proc_close_device(clist_close
);
45 private dev_proc_get_band(clist_get_band
);
48 extern dev_proc_fill_rectangle(clist_fill_rectangle
);
49 extern dev_proc_copy_mono(clist_copy_mono
);
50 extern dev_proc_copy_color(clist_copy_color
);
51 extern dev_proc_copy_alpha(clist_copy_alpha
);
52 extern dev_proc_strip_tile_rectangle(clist_strip_tile_rectangle
);
53 extern dev_proc_strip_copy_rop(clist_strip_copy_rop
);
56 extern dev_proc_fill_path(clist_fill_path
);
57 extern dev_proc_stroke_path(clist_stroke_path
);
60 extern dev_proc_fill_mask(clist_fill_mask
);
61 extern dev_proc_begin_image(clist_begin_image
);
62 extern dev_proc_begin_typed_image(clist_begin_typed_image
);
63 extern dev_proc_create_compositor(clist_create_compositor
);
66 extern dev_proc_get_bits_rectangle(clist_get_bits_rectangle
);
68 /* Other forward declarations */
69 private int clist_put_current_params(P1(gx_device_clist_writer
*cldev
));
71 /* The device procedures */
72 const gx_device_procs gs_clist_device_procs
= {
74 gx_forward_get_initial_matrix
,
75 gx_default_sync_output
,
78 gx_forward_map_rgb_color
,
79 gx_forward_map_color_rgb
,
81 gx_default_tile_rectangle
,
86 gx_forward_get_params
,
87 gx_forward_put_params
,
88 gx_forward_map_cmyk_color
,
89 gx_forward_get_xfont_procs
,
90 gx_forward_get_xfont_device
,
91 gx_forward_map_rgb_alpha_color
,
92 gx_forward_get_page_device
,
93 gx_forward_get_alpha_bits
,
100 gx_default_fill_trapezoid
,
101 gx_default_fill_parallelogram
,
102 gx_default_fill_triangle
,
103 gx_default_draw_thin_line
,
105 gx_default_image_data
,
106 gx_default_end_image
,
107 clist_strip_tile_rectangle
,
108 clist_strip_copy_rop
,
109 gx_forward_get_clipping_box
,
110 clist_begin_typed_image
,
111 clist_get_bits_rectangle
,
112 gx_forward_map_color_rgb_alpha
,
113 clist_create_compositor
,
114 gx_forward_get_hardware_params
,
115 gx_default_text_begin
118 /* ------ Define the command set and syntax ------ */
120 /* Initialization for imager state. */
121 /* The initial scale is arbitrary. */
122 const gs_imager_state clist_imager_state_initial
=
123 {gs_imager_state_initial(300.0 / 72.0)};
126 * The buffer area (data, data_size) holds a bitmap cache when both writing
127 * and reading. The rest of the space is used for the command buffer and
128 * band state bookkeeping when writing, and for the rendering buffer (image
129 * device) when reading. For the moment, we divide the space up
130 * arbitrarily, except that we allocate less space for the bitmap cache if
131 * the device doesn't need halftoning.
133 * All the routines for allocating tables in the buffer are idempotent, so
134 * they can be used to check whether a given-size buffer is large enough.
138 * Calculate the desired size for the tile cache.
141 clist_tile_cache_size(const gx_device
* target
, uint data_size
)
144 (data_size
/ 5) & -align_cached_bits_mod
; /* arbitrary */
146 if (!gx_device_must_halftone(target
)) { /* No halftones -- cache holds only Patterns & characters. */
147 bits_size
-= bits_size
>> 2;
149 #define min_bits_size 1024
150 if (bits_size
< min_bits_size
)
151 bits_size
= min_bits_size
;
157 * Initialize the allocation for the tile cache. Sets: tile_hash_mask,
158 * tile_max_count, tile_table, chunk (structure), bits (structure).
161 clist_init_tile_cache(gx_device
* dev
, byte
* init_data
, ulong data_size
)
163 gx_device_clist_writer
* const cdev
=
164 &((gx_device_clist
*)dev
)->writer
;
165 byte
*data
= init_data
;
166 uint bits_size
= data_size
;
168 * Partition the bits area between the hash table and the actual
169 * bitmaps. The per-bitmap overhead is about 24 bytes; if the
170 * average character size is 10 points, its bitmap takes about 24 +
171 * 0.5 * 10/72 * xdpi * 10/72 * ydpi / 8 bytes (the 0.5 being a
172 * fudge factor to account for characters being narrower than they
173 * are tall), which gives us a guideline for the size of the hash
177 (uint
) (dev
->x_pixels_per_inch
* dev
->y_pixels_per_inch
*
178 (0.5 * 10 / 72 * 10 / 72 / 8)) + 24;
179 uint hc
= bits_size
/ avg_char_size
;
182 while ((hc
+ 1) & hc
)
183 hc
|= hc
>> 1; /* make mask (power of 2 - 1) */
185 hc
= 0xff; /* make allowance for halftone tiles */
187 hc
= 0xfff; /* cmd_op_set_tile_index has 12-bit operand */
188 /* Make sure the tables will fit. */
189 while (hc
>= 3 && (hsize
= (hc
+ 1) * sizeof(tile_hash
)) >= bits_size
)
192 return_error(gs_error_rangecheck
);
193 cdev
->tile_hash_mask
= hc
;
194 cdev
->tile_max_count
= hc
- (hc
>> 2);
195 cdev
->tile_table
= (tile_hash
*) data
;
198 gx_bits_cache_chunk_init(&cdev
->chunk
, data
, bits_size
);
199 gx_bits_cache_init(&cdev
->bits
, &cdev
->chunk
);
204 * Initialize the allocation for the bands. Requires: target. Sets:
205 * page_band_height (=page_info.band_params.BandHeight), nbands.
208 clist_init_bands(gx_device
* dev
, uint data_size
, int band_width
,
211 gx_device_clist_writer
* const cdev
=
212 &((gx_device_clist
*)dev
)->writer
;
213 gx_device
*target
= cdev
->target
;
216 if (gdev_mem_data_size((gx_device_memory
*) target
, band_width
,
217 band_height
) > data_size
219 return_error(gs_error_rangecheck
);
220 cdev
->page_band_height
= band_height
;
221 nbands
= (target
->height
+ band_height
- 1) / band_height
;
222 cdev
->nbands
= nbands
;
224 if (gs_debug_c('l') | gs_debug_c(':'))
225 dlprintf4("[:]width=%d, band_width=%d, band_height=%d, nbands=%d\n",
226 target
->width
, band_width
, band_height
, nbands
);
232 * Initialize the allocation for the band states, which are used only
233 * when writing. Requires: nbands. Sets: states, cbuf, cend.
236 clist_init_states(gx_device
* dev
, byte
* init_data
, uint data_size
)
238 gx_device_clist_writer
* const cdev
=
239 &((gx_device_clist
*)dev
)->writer
;
240 ulong state_size
= cdev
->nbands
* (ulong
) sizeof(gx_clist_state
);
242 fprintf(stderr
, "DEBUG: init_data = %p for cdev->states!\n", init_data
);
245 * The +100 in the next line is bogus, but we don't know what the
246 * real check should be. We're effectively assuring that at least 100
247 * bytes will be available to buffer command operands.
249 if (state_size
+ sizeof(cmd_prefix
) + cmd_largest_size
+ 100 > data_size
)
250 return_error(gs_error_rangecheck
);
251 cdev
->states
= (gx_clist_state
*) init_data
;
252 cdev
->cbuf
= init_data
+ state_size
;
253 cdev
->cend
= init_data
+ data_size
;
258 * Initialize all the data allocations. Requires: target. Sets:
259 * page_tile_cache_size, page_info.band_params.BandWidth,
260 * page_info.band_params.BandBufferSpace, + see above.
263 clist_init_data(gx_device
* dev
, byte
* init_data
, uint data_size
)
265 gx_device_clist_writer
* const cdev
=
266 &((gx_device_clist
*)dev
)->writer
;
267 gx_device
*target
= cdev
->target
;
268 const int band_width
=
269 cdev
->page_info
.band_params
.BandWidth
=
270 (cdev
->band_params
.BandWidth
? cdev
->band_params
.BandWidth
:
272 int band_height
= cdev
->band_params
.BandHeight
;
273 const uint band_space
=
274 cdev
->page_info
.band_params
.BandBufferSpace
=
275 (cdev
->band_params
.BandBufferSpace
?
276 cdev
->band_params
.BandBufferSpace
: data_size
);
277 byte
*data
= init_data
;
278 uint size
= band_space
;
282 if (band_height
) { /*
283 * The band height is fixed, so the band buffer requirement
284 * is completely determined.
286 uint band_data_size
=
287 gdev_mem_data_size((gx_device_memory
*) target
,
288 band_width
, band_height
);
290 if (band_data_size
>= band_space
)
291 return_error(gs_error_rangecheck
);
292 bits_size
= min(band_space
- band_data_size
, data_size
>> 1);
293 /**** MRS - make sure bits_size is 64-bit aligned for clist data!!! ****/
294 bits_size
= (bits_size
+ 7) & ~7;
296 * Choose the largest band height that will fit in the
297 * rendering-time buffer.
299 bits_size
= clist_tile_cache_size(target
, band_space
);
300 bits_size
= min(bits_size
, data_size
>> 1);
301 /**** MRS - make sure bits_size is 64-bit aligned for clist data!!! ****/
302 bits_size
= (bits_size
+ 7) & ~7;
303 band_height
= gdev_mem_max_height((gx_device_memory
*) target
,
305 band_space
- bits_size
);
306 if (band_height
== 0)
307 return_error(gs_error_rangecheck
);
309 code
= clist_init_tile_cache(dev
, data
, bits_size
);
312 cdev
->page_tile_cache_size
= bits_size
;
315 code
= clist_init_bands(dev
, size
, band_width
, band_height
);
318 return clist_init_states(dev
, data
, data_size
- bits_size
);
321 * Reset the device state (for writing). This routine requires only
322 * data, data_size, and target to be set, and is idempotent.
325 clist_reset(gx_device
* dev
)
327 gx_device_clist_writer
* const cdev
=
328 &((gx_device_clist
*)dev
)->writer
;
329 int code
= clist_init_data(dev
, cdev
->data
, cdev
->data_size
);
333 return (cdev
->permanent_error
= code
);
334 /* Now initialize the rest of the state. */
335 cdev
->permanent_error
= 0;
336 nbands
= cdev
->nbands
;
337 cdev
->ymin
= cdev
->ymax
= -1; /* render_init not done yet */
338 memset(cdev
->tile_table
, 0, (cdev
->tile_hash_mask
+ 1) *
339 sizeof(*cdev
->tile_table
));
340 cdev
->cnext
= cdev
->cbuf
;
342 cdev
->band_range_list
.head
= cdev
->band_range_list
.tail
= 0;
343 cdev
->band_range_min
= 0;
344 cdev
->band_range_max
= nbands
- 1;
347 gx_clist_state
*states
= cdev
->states
;
349 for (band
= 0; band
< nbands
; band
++, states
++) {
350 static const gx_clist_state cls_initial
=
351 {cls_initial_values
};
353 *states
= cls_initial
;
357 * Round up the size of the per-tile band mask so that the bits,
358 * which follow it, stay aligned.
360 cdev
->tile_band_mask_size
=
361 ((nbands
+ (align_bitmap_mod
* 8 - 1)) >> 3) &
362 ~(align_bitmap_mod
- 1);
364 * Initialize the all-band parameters to impossible values,
365 * to force them to be written the first time they are used.
367 memset(&cdev
->tile_params
, 0, sizeof(cdev
->tile_params
));
368 cdev
->tile_depth
= 0;
369 cdev
->tile_known_min
= nbands
;
370 cdev
->tile_known_max
= -1;
371 cdev
->imager_state
= clist_imager_state_initial
;
372 cdev
->clip_path
= NULL
;
373 cdev
->clip_path_id
= gs_no_id
;
374 cdev
->color_space
= 0;
378 for (i
= 0; i
< countof(cdev
->transfer_ids
); ++i
)
379 cdev
->transfer_ids
[i
] = gs_no_id
;
381 cdev
->black_generation_id
= gs_no_id
;
382 cdev
->undercolor_removal_id
= gs_no_id
;
383 cdev
->device_halftone_id
= gs_no_id
;
387 * Initialize the device state (for writing). This routine requires only
388 * data, data_size, and target to be set, and is idempotent.
391 clist_init(gx_device
* dev
)
393 gx_device_clist_writer
* const cdev
=
394 &((gx_device_clist
*)dev
)->writer
;
395 int code
= clist_reset(dev
);
398 cdev
->image_enum_id
= gs_no_id
;
399 cdev
->error_is_retryable
= 0;
400 cdev
->driver_call_nesting
= 0;
401 cdev
->ignore_lo_mem_warnings
= 0;
406 /* (Re)init open band files for output (set block size, etc). */
407 private int /* ret 0 ok, -ve error code */
408 clist_reinit_output_file(gx_device
*dev
)
409 { gx_device_clist_writer
* const cdev
=
410 &((gx_device_clist
*)dev
)->writer
;
413 /* bfile needs to guarantee cmd_blocks for: 1 band range, nbands */
414 /* & terminating entry */
415 int b_block
= sizeof(cmd_block
) * (cdev
->nbands
+ 2);
417 /* cfile needs to guarantee one writer buffer */
418 /* + one end_clip cmd (if during image's clip path setup) */
419 /* + an end_image cmd for each band (if during image) */
420 /* + end_cmds for each band and one band range */
422 = cdev
->cend
- cdev
->cbuf
+ 2 + cdev
->nbands
* 2 + (cdev
->nbands
+ 1);
424 /* All this is for partial page rendering's benefit, do only */
425 /* if partial page rendering is available */
426 if ( clist_test_VMerror_recoverable(cdev
) )
427 { if (cdev
->page_bfile
!= 0)
428 code
= clist_set_memory_warning(cdev
->page_bfile
, b_block
);
429 if (code
>= 0 && cdev
->page_cfile
!= 0)
430 code
= clist_set_memory_warning(cdev
->page_cfile
, c_block
);
435 /* Write out the current parameters that must be at the head of each page */
436 /* if async rendering is in effect */
438 clist_emit_page_header(gx_device
*dev
)
440 gx_device_clist_writer
* const cdev
=
441 &((gx_device_clist
*)dev
)->writer
;
444 if ( (cdev
->disable_mask
& clist_disable_pass_thru_params
) )
446 if ( ( code
= clist_put_current_params(cdev
) ) >= 0 )
448 while ( ( code
= clist_VMerror_recover(cdev
, code
) ) < 0 );
449 cdev
->permanent_error
= (code
< 0) ? code
: 0;
450 if (cdev
->permanent_error
< 0)
451 cdev
->error_is_retryable
= 0;
456 /* Open the device's bandfiles */
458 clist_open_output_file(gx_device
*dev
)
460 gx_device_clist_writer
* const cdev
=
461 &((gx_device_clist
*)dev
)->writer
;
465 if (cdev
->do_not_open_or_close_bandfiles
)
466 return 0; /* external bandfile open/close managed externally */
467 cdev
->page_cfile
= 0; /* in case of failure */
468 cdev
->page_bfile
= 0; /* ditto */
469 code
= clist_init(dev
);
473 strcat(fmode
, gp_fmode_binary_suffix
);
474 cdev
->page_cfname
[0] = 0; /* create a new file */
475 cdev
->page_bfname
[0] = 0; /* ditto */
476 cdev
->page_bfile_end_pos
= 0;
477 if ((code
= clist_fopen(cdev
->page_cfname
, fmode
, &cdev
->page_cfile
,
478 cdev
->bandlist_memory
, cdev
->bandlist_memory
,
480 (code
= clist_fopen(cdev
->page_bfname
, fmode
, &cdev
->page_bfile
,
481 cdev
->bandlist_memory
, cdev
->bandlist_memory
,
483 (code
= clist_reinit_output_file(dev
)) < 0
485 clist_close_output_file(dev
);
486 cdev
->permanent_error
= code
;
487 cdev
->error_is_retryable
= 0;
492 /* Close the device by freeing the temporary files. */
493 /* Note that this does not deallocate the buffer. */
495 clist_close_output_file(gx_device
*dev
)
497 gx_device_clist_writer
* const cdev
=
498 &((gx_device_clist
*)dev
)->writer
;
500 if (cdev
->page_cfile
!= NULL
) {
501 clist_fclose(cdev
->page_cfile
, cdev
->page_cfname
, true);
502 cdev
->page_cfile
= NULL
;
504 if (cdev
->page_bfile
!= NULL
) {
505 clist_fclose(cdev
->page_bfile
, cdev
->page_bfname
, true);
506 cdev
->page_bfile
= NULL
;
511 /* Open the device by initializing the device state and opening the */
514 clist_open(gx_device
*dev
)
516 gx_device_clist_writer
* const cdev
=
517 &((gx_device_clist
*)dev
)->writer
;
520 cdev
->permanent_error
= 0;
521 code
= clist_init(dev
);
524 code
= clist_open_output_file(dev
);
526 code
= clist_emit_page_header(dev
);
531 clist_close(gx_device
*dev
)
533 gx_device_clist_writer
* const cdev
=
534 &((gx_device_clist
*)dev
)->writer
;
536 if (cdev
->do_not_open_or_close_bandfiles
)
538 return clist_close_output_file(dev
);
541 /* The output_page procedure should never be called! */
543 clist_output_page(gx_device
* dev
, int num_copies
, int flush
)
545 return_error(gs_error_Fatal
);
548 /* Reset (or prepare to append to) the command list after printing a page. */
550 clist_finish_page(gx_device
*dev
, bool flush
)
552 gx_device_clist_writer
* const cdev
=
553 &((gx_device_clist
*)dev
)->writer
;
557 if (cdev
->page_cfile
!= 0)
558 clist_rewind(cdev
->page_cfile
, true, cdev
->page_cfname
);
559 if (cdev
->page_bfile
!= 0)
560 clist_rewind(cdev
->page_bfile
, true, cdev
->page_bfname
);
561 cdev
->page_bfile_end_pos
= 0;
563 if (cdev
->page_cfile
!= 0)
564 clist_fseek(cdev
->page_cfile
, 0L, SEEK_END
, cdev
->page_cfname
);
565 if (cdev
->page_bfile
!= 0)
566 clist_fseek(cdev
->page_bfile
, 0L, SEEK_END
, cdev
->page_bfname
);
568 code
= clist_init(dev
); /* reinitialize */
570 code
= clist_reinit_output_file(dev
);
572 code
= clist_emit_page_header(dev
);
577 /* ------ Writing ------ */
579 /* End a page by flushing the buffer and terminating the command list. */
580 int /* ret 0 all-ok, -ve error code, or +1 ok w/low-mem warning */
581 clist_end_page(gx_device_clist_writer
* cldev
)
583 int code
= cmd_write_buffer(cldev
, cmd_opv_end_page
);
589 * Write the terminating entry in the block file.
590 * Note that because of copypage, there may be many such entries.
592 cb
.band_min
= cb
.band_max
= cmd_band_end
;
593 cb
.pos
= (cldev
->page_cfile
== 0 ? 0 : clist_ftell(cldev
->page_cfile
));
594 clist_fwrite_chars(&cb
, sizeof(cb
), cldev
->page_bfile
);
595 cldev
->page_bfile_end_pos
= clist_ftell(cldev
->page_bfile
);
599 cldev
->page_bfile_end_pos
= clist_ftell(cldev
->page_bfile
);
604 /* Reset warning margin to 0 to release reserve memory if mem files */
605 if (cldev
->page_bfile
!= 0)
606 clist_set_memory_warning(cldev
->page_bfile
, 0);
607 if (cldev
->page_cfile
!= 0)
608 clist_set_memory_warning(cldev
->page_cfile
, 0);
611 if (gs_debug_c('l') | gs_debug_c(':'))
612 dlprintf2("[:]clist_end_page at cfile=%ld, bfile=%ld\n",
613 cb
.pos
, cldev
->page_bfile_end_pos
);
618 /* Recover recoverable VM error if possible without flushing */
619 int /* ret -ve err, >= 0 if recovered w/# = cnt pages left in page queue */
620 clist_VMerror_recover(gx_device_clist_writer
*cldev
,
623 int code
= old_error_code
;
626 if (!clist_test_VMerror_recoverable(cldev
) ||
627 !cldev
->error_is_retryable
||
628 old_error_code
!= gs_error_VMerror
630 return old_error_code
;
632 /* Do some rendering, return if enough memory is now free */
635 (*cldev
->free_up_bandlist_memory
)( (gx_device
*)cldev
, false );
636 if (pages_remain
< 0) {
637 code
= pages_remain
; /* abort, error or interrupt req */
640 if (clist_reinit_output_file( (gx_device
*)cldev
) == 0) {
641 code
= pages_remain
; /* got enough memory to continue */
644 } while (pages_remain
);
646 if_debug1('L', "[L]soft flush of command list, status: %d\n", code
);
650 /* If recoverable VM error, flush & try to recover it */
651 int /* ret 0 ok, else -ve error */
652 clist_VMerror_recover_flush(gx_device_clist_writer
*cldev
,
659 /* If the device has the ability to render partial pages, flush
660 * out the bandlist, and reset the writing state. Then, get the
661 * device to render this band. When done, see if there's now enough
662 * memory to satisfy the minimum low-memory guarantees. If not,
663 * get the device to render some more. If there's nothing left to
664 * render & still insufficient memory, declare an error condition.
666 if (!clist_test_VMerror_recoverable(cldev
) ||
667 old_error_code
!= gs_error_VMerror
669 return old_error_code
; /* sorry, don't have any means to recover this error */
670 free_code
= (*cldev
->free_up_bandlist_memory
)( (gx_device
*)cldev
, true );
672 /* Reset the state of bands to "don't know anything" */
673 reset_code
= clist_reset( (gx_device
*)cldev
);
675 reset_code
= clist_open_output_file( (gx_device
*)cldev
);
676 if ( reset_code
>= 0 &&
677 (cldev
->disable_mask
& clist_disable_pass_thru_params
)
679 reset_code
= clist_put_current_params(cldev
);
680 if (reset_code
< 0) {
681 cldev
->permanent_error
= reset_code
;
682 cldev
->error_is_retryable
= 0;
685 code
= (reset_code
< 0 ? reset_code
: free_code
< 0 ? old_error_code
: 0);
686 if_debug1('L', "[L]hard flush of command list, status: %d\n", code
);
690 /* Write the target device's current parameter list */
691 private int /* ret 0 all ok, -ve error */
692 clist_put_current_params(gx_device_clist_writer
*cldev
)
694 gx_device
*target
= cldev
->target
;
695 gs_c_param_list param_list
;
699 * If a put_params call fails, the device will be left in a closed
700 * state, but higher-level code won't notice this fact. We flag this by
701 * setting permanent_error, which prevents writing to the command list.
704 if (cldev
->permanent_error
)
705 return cldev
->permanent_error
;
706 gs_c_param_list_write(¶m_list
, cldev
->memory
);
707 code
= (*dev_proc(target
, get_params
))
708 (target
, (gs_param_list
*)¶m_list
);
710 gs_c_param_list_read(¶m_list
);
711 code
= cmd_put_params( cldev
, (gs_param_list
*)¶m_list
);
713 gs_c_param_list_release(¶m_list
);
718 /* ---------------- Driver interface ---------------- */
721 clist_get_band(gx_device
* dev
, int y
, int *band_start
)
723 gx_device_clist_writer
* const cdev
=
724 &((gx_device_clist
*)dev
)->writer
;
725 int band_height
= cdev
->page_band_height
;
730 else if (y
>= dev
->height
)
732 *band_start
= start
= y
- y
% band_height
;
733 return min(dev
->height
- start
, band_height
);