]> git.ipfire.org Git - thirdparty/cups.git/blob - pstoraster/gxclist.c
Import cups.org releases
[thirdparty/cups.git] / pstoraster / gxclist.c
1 /*
2 Copyright 1993-2001 by Easy Software Products.
3 Copyright 1991, 1996, 1997, 1998 Aladdin Enterprises. All rights reserved.
4
5 This file is part of GNU Ghostscript.
6
7 GNU Ghostscript is distributed in the hope that it will be useful, but
8 WITHOUT ANY WARRANTY. No author or distributor accepts responsibility
9 to anyone for the consequences of using it or for whether it serves any
10 particular purpose or works at all, unless he says so in writing. Refer
11 to the GNU General Public License for full details.
12
13 Everyone is granted permission to copy, modify and redistribute GNU
14 Ghostscript, but only under the conditions described in the GNU General
15 Public License. A copy of this license is supposed to have been given
16 to you along with GNU Ghostscript so you can know your rights and
17 responsibilities. It should be in a file named COPYING. Among other
18 things, the copyright notice and this notice must be preserved on all
19 copies.
20
21 Aladdin Enterprises supports the work of the GNU Project, but is not
22 affiliated with the Free Software Foundation or the GNU Project. GNU
23 Ghostscript, as distributed by Aladdin Enterprises, does not require any
24 GNU software to build or run it.
25 */
26
27 /*$Id$ */
28 /* Command list document- and page-level code. */
29 #include "memory_.h"
30 #include "string_.h"
31 #include "gx.h"
32 #include "gp.h"
33 #include "gpcheck.h"
34 #include "gserrors.h"
35 #include "gxdevice.h"
36 #include "gxdevmem.h" /* must precede gxcldev.h */
37 #include "gxcldev.h"
38 #include "gxclpath.h"
39 #include "gsparams.h"
40
41 /* Forward declarations of driver procedures */
42 private dev_proc_open_device(clist_open);
43 private dev_proc_output_page(clist_output_page);
44 private dev_proc_close_device(clist_close);
45 private dev_proc_get_band(clist_get_band);
46
47 /* In gxclrect.c */
48 extern dev_proc_fill_rectangle(clist_fill_rectangle);
49 extern dev_proc_copy_mono(clist_copy_mono);
50 extern dev_proc_copy_color(clist_copy_color);
51 extern dev_proc_copy_alpha(clist_copy_alpha);
52 extern dev_proc_strip_tile_rectangle(clist_strip_tile_rectangle);
53 extern dev_proc_strip_copy_rop(clist_strip_copy_rop);
54
55 /* In gxclpath.c */
56 extern dev_proc_fill_path(clist_fill_path);
57 extern dev_proc_stroke_path(clist_stroke_path);
58
59 /* In gxclimag.c */
60 extern dev_proc_fill_mask(clist_fill_mask);
61 extern dev_proc_begin_image(clist_begin_image);
62 extern dev_proc_begin_typed_image(clist_begin_typed_image);
63 extern dev_proc_create_compositor(clist_create_compositor);
64
65 /* In gxclread.c */
66 extern dev_proc_get_bits_rectangle(clist_get_bits_rectangle);
67
68 /* Other forward declarations */
69 private int clist_put_current_params(P1(gx_device_clist_writer *cldev));
70
71 /* The device procedures */
72 const gx_device_procs gs_clist_device_procs = {
73 clist_open,
74 gx_forward_get_initial_matrix,
75 gx_default_sync_output,
76 clist_output_page,
77 clist_close,
78 gx_forward_map_rgb_color,
79 gx_forward_map_color_rgb,
80 clist_fill_rectangle,
81 gx_default_tile_rectangle,
82 clist_copy_mono,
83 clist_copy_color,
84 gx_default_draw_line,
85 gx_default_get_bits,
86 gx_forward_get_params,
87 gx_forward_put_params,
88 gx_forward_map_cmyk_color,
89 gx_forward_get_xfont_procs,
90 gx_forward_get_xfont_device,
91 gx_forward_map_rgb_alpha_color,
92 gx_forward_get_page_device,
93 gx_forward_get_alpha_bits,
94 clist_copy_alpha,
95 clist_get_band,
96 gx_default_copy_rop,
97 clist_fill_path,
98 clist_stroke_path,
99 clist_fill_mask,
100 gx_default_fill_trapezoid,
101 gx_default_fill_parallelogram,
102 gx_default_fill_triangle,
103 gx_default_draw_thin_line,
104 clist_begin_image,
105 gx_default_image_data,
106 gx_default_end_image,
107 clist_strip_tile_rectangle,
108 clist_strip_copy_rop,
109 gx_forward_get_clipping_box,
110 clist_begin_typed_image,
111 clist_get_bits_rectangle,
112 gx_forward_map_color_rgb_alpha,
113 clist_create_compositor,
114 gx_forward_get_hardware_params,
115 gx_default_text_begin
116 };
117
118 /* ------ Define the command set and syntax ------ */
119
120 /* Initialization for imager state. */
121 /* The initial scale is arbitrary. */
122 const gs_imager_state clist_imager_state_initial =
123 {gs_imager_state_initial(300.0 / 72.0)};
124
125 /*
126 * The buffer area (data, data_size) holds a bitmap cache when both writing
127 * and reading. The rest of the space is used for the command buffer and
128 * band state bookkeeping when writing, and for the rendering buffer (image
129 * device) when reading. For the moment, we divide the space up
130 * arbitrarily, except that we allocate less space for the bitmap cache if
131 * the device doesn't need halftoning.
132 *
133 * All the routines for allocating tables in the buffer are idempotent, so
134 * they can be used to check whether a given-size buffer is large enough.
135 */
136
137 /*
138 * Calculate the desired size for the tile cache.
139 */
140 private uint
141 clist_tile_cache_size(const gx_device * target, uint data_size)
142 {
143 uint bits_size =
144 (data_size / 5) & -align_cached_bits_mod; /* arbitrary */
145
146 if (!gx_device_must_halftone(target)) { /* No halftones -- cache holds only Patterns & characters. */
147 bits_size -= bits_size >> 2;
148 }
149 #define min_bits_size 1024
150 if (bits_size < min_bits_size)
151 bits_size = min_bits_size;
152 #undef min_bits_size
153 return bits_size;
154 }
155
156 /*
157 * Initialize the allocation for the tile cache. Sets: tile_hash_mask,
158 * tile_max_count, tile_table, chunk (structure), bits (structure).
159 */
160 private int
161 clist_init_tile_cache(gx_device * dev, byte * init_data, ulong data_size)
162 {
163 gx_device_clist_writer * const cdev =
164 &((gx_device_clist *)dev)->writer;
165 byte *data = init_data;
166 uint bits_size = data_size;
167 /*
168 * Partition the bits area between the hash table and the actual
169 * bitmaps. The per-bitmap overhead is about 24 bytes; if the
170 * average character size is 10 points, its bitmap takes about 24 +
171 * 0.5 * 10/72 * xdpi * 10/72 * ydpi / 8 bytes (the 0.5 being a
172 * fudge factor to account for characters being narrower than they
173 * are tall), which gives us a guideline for the size of the hash
174 * table.
175 */
176 uint avg_char_size =
177 (uint) (dev->x_pixels_per_inch * dev->y_pixels_per_inch *
178 (0.5 * 10 / 72 * 10 / 72 / 8)) + 24;
179 uint hc = bits_size / avg_char_size;
180 uint hsize;
181
182 while ((hc + 1) & hc)
183 hc |= hc >> 1; /* make mask (power of 2 - 1) */
184 if (hc < 0xff)
185 hc = 0xff; /* make allowance for halftone tiles */
186 else if (hc > 0xfff)
187 hc = 0xfff; /* cmd_op_set_tile_index has 12-bit operand */
188 /* Make sure the tables will fit. */
189 while (hc >= 3 && (hsize = (hc + 1) * sizeof(tile_hash)) >= bits_size)
190 hc >>= 1;
191 if (hc < 3)
192 return_error(gs_error_rangecheck);
193 cdev->tile_hash_mask = hc;
194 cdev->tile_max_count = hc - (hc >> 2);
195 cdev->tile_table = (tile_hash *) data;
196 data += hsize;
197 bits_size -= hsize;
198 gx_bits_cache_chunk_init(&cdev->chunk, data, bits_size);
199 gx_bits_cache_init(&cdev->bits, &cdev->chunk);
200 return 0;
201 }
202
203 /*
204 * Initialize the allocation for the bands. Requires: target. Sets:
205 * page_band_height (=page_info.band_params.BandHeight), nbands.
206 */
207 private int
208 clist_init_bands(gx_device * dev, uint data_size, int band_width,
209 int band_height)
210 {
211 gx_device_clist_writer * const cdev =
212 &((gx_device_clist *)dev)->writer;
213 gx_device *target = cdev->target;
214 int nbands;
215
216 if (gdev_mem_data_size((gx_device_memory *) target, band_width,
217 band_height) > data_size
218 )
219 return_error(gs_error_rangecheck);
220 cdev->page_band_height = band_height;
221 nbands = (target->height + band_height - 1) / band_height;
222 cdev->nbands = nbands;
223 #ifdef DEBUG
224 if (gs_debug_c('l') | gs_debug_c(':'))
225 dlprintf4("[:]width=%d, band_width=%d, band_height=%d, nbands=%d\n",
226 target->width, band_width, band_height, nbands);
227 #endif
228 return 0;
229 }
230
231 /*
232 * Initialize the allocation for the band states, which are used only
233 * when writing. Requires: nbands. Sets: states, cbuf, cend.
234 */
235 private int
236 clist_init_states(gx_device * dev, byte * init_data, uint data_size)
237 {
238 gx_device_clist_writer * const cdev =
239 &((gx_device_clist *)dev)->writer;
240 ulong state_size = cdev->nbands * (ulong) sizeof(gx_clist_state);
241
242 fprintf(stderr, "DEBUG: init_data = %p for cdev->states!\n", init_data);
243
244 /*
245 * The +100 in the next line is bogus, but we don't know what the
246 * real check should be. We're effectively assuring that at least 100
247 * bytes will be available to buffer command operands.
248 */
249 if (state_size + sizeof(cmd_prefix) + cmd_largest_size + 100 > data_size)
250 return_error(gs_error_rangecheck);
251 cdev->states = (gx_clist_state *) init_data;
252 cdev->cbuf = init_data + state_size;
253 cdev->cend = init_data + data_size;
254 return 0;
255 }
256
257 /*
258 * Initialize all the data allocations. Requires: target. Sets:
259 * page_tile_cache_size, page_info.band_params.BandWidth,
260 * page_info.band_params.BandBufferSpace, + see above.
261 */
262 private int
263 clist_init_data(gx_device * dev, byte * init_data, uint data_size)
264 {
265 gx_device_clist_writer * const cdev =
266 &((gx_device_clist *)dev)->writer;
267 gx_device *target = cdev->target;
268 const int band_width =
269 cdev->page_info.band_params.BandWidth =
270 (cdev->band_params.BandWidth ? cdev->band_params.BandWidth :
271 target->width);
272 int band_height = cdev->band_params.BandHeight;
273 const uint band_space =
274 cdev->page_info.band_params.BandBufferSpace =
275 (cdev->band_params.BandBufferSpace ?
276 cdev->band_params.BandBufferSpace : data_size);
277 byte *data = init_data;
278 uint size = band_space;
279 uint bits_size;
280 int code;
281
282 if (band_height) { /*
283 * The band height is fixed, so the band buffer requirement
284 * is completely determined.
285 */
286 uint band_data_size =
287 gdev_mem_data_size((gx_device_memory *) target,
288 band_width, band_height);
289
290 if (band_data_size >= band_space)
291 return_error(gs_error_rangecheck);
292 bits_size = min(band_space - band_data_size, data_size >> 1);
293 /**** MRS - make sure bits_size is 64-bit aligned for clist data!!! ****/
294 bits_size = (bits_size + 7) & ~7;
295 } else { /*
296 * Choose the largest band height that will fit in the
297 * rendering-time buffer.
298 */
299 bits_size = clist_tile_cache_size(target, band_space);
300 bits_size = min(bits_size, data_size >> 1);
301 /**** MRS - make sure bits_size is 64-bit aligned for clist data!!! ****/
302 bits_size = (bits_size + 7) & ~7;
303 band_height = gdev_mem_max_height((gx_device_memory *) target,
304 band_width,
305 band_space - bits_size);
306 if (band_height == 0)
307 return_error(gs_error_rangecheck);
308 }
309 code = clist_init_tile_cache(dev, data, bits_size);
310 if (code < 0)
311 return code;
312 cdev->page_tile_cache_size = bits_size;
313 data += bits_size;
314 size -= bits_size;
315 code = clist_init_bands(dev, size, band_width, band_height);
316 if (code < 0)
317 return code;
318 return clist_init_states(dev, data, data_size - bits_size);
319 }
320 /*
321 * Reset the device state (for writing). This routine requires only
322 * data, data_size, and target to be set, and is idempotent.
323 */
324 private int
325 clist_reset(gx_device * dev)
326 {
327 gx_device_clist_writer * const cdev =
328 &((gx_device_clist *)dev)->writer;
329 int code = clist_init_data(dev, cdev->data, cdev->data_size);
330 int nbands;
331
332 if (code < 0)
333 return (cdev->permanent_error = code);
334 /* Now initialize the rest of the state. */
335 cdev->permanent_error = 0;
336 nbands = cdev->nbands;
337 cdev->ymin = cdev->ymax = -1; /* render_init not done yet */
338 memset(cdev->tile_table, 0, (cdev->tile_hash_mask + 1) *
339 sizeof(*cdev->tile_table));
340 cdev->cnext = cdev->cbuf;
341 cdev->ccl = 0;
342 cdev->band_range_list.head = cdev->band_range_list.tail = 0;
343 cdev->band_range_min = 0;
344 cdev->band_range_max = nbands - 1;
345 {
346 int band;
347 gx_clist_state *states = cdev->states;
348
349 for (band = 0; band < nbands; band++, states++) {
350 static const gx_clist_state cls_initial =
351 {cls_initial_values};
352
353 *states = cls_initial;
354 }
355 }
356 /*
357 * Round up the size of the per-tile band mask so that the bits,
358 * which follow it, stay aligned.
359 */
360 cdev->tile_band_mask_size =
361 ((nbands + (align_bitmap_mod * 8 - 1)) >> 3) &
362 ~(align_bitmap_mod - 1);
363 /*
364 * Initialize the all-band parameters to impossible values,
365 * to force them to be written the first time they are used.
366 */
367 memset(&cdev->tile_params, 0, sizeof(cdev->tile_params));
368 cdev->tile_depth = 0;
369 cdev->tile_known_min = nbands;
370 cdev->tile_known_max = -1;
371 cdev->imager_state = clist_imager_state_initial;
372 cdev->clip_path = NULL;
373 cdev->clip_path_id = gs_no_id;
374 cdev->color_space = 0;
375 {
376 int i;
377
378 for (i = 0; i < countof(cdev->transfer_ids); ++i)
379 cdev->transfer_ids[i] = gs_no_id;
380 }
381 cdev->black_generation_id = gs_no_id;
382 cdev->undercolor_removal_id = gs_no_id;
383 cdev->device_halftone_id = gs_no_id;
384 return 0;
385 }
386 /*
387 * Initialize the device state (for writing). This routine requires only
388 * data, data_size, and target to be set, and is idempotent.
389 */
390 private int
391 clist_init(gx_device * dev)
392 {
393 gx_device_clist_writer * const cdev =
394 &((gx_device_clist *)dev)->writer;
395 int code = clist_reset(dev);
396
397 if (code >= 0) {
398 cdev->image_enum_id = gs_no_id;
399 cdev->error_is_retryable = 0;
400 cdev->driver_call_nesting = 0;
401 cdev->ignore_lo_mem_warnings = 0;
402 }
403 return code;
404 }
405
406 /* (Re)init open band files for output (set block size, etc). */
407 private int /* ret 0 ok, -ve error code */
408 clist_reinit_output_file(gx_device *dev)
409 { gx_device_clist_writer * const cdev =
410 &((gx_device_clist *)dev)->writer;
411 int code = 0;
412
413 /* bfile needs to guarantee cmd_blocks for: 1 band range, nbands */
414 /* & terminating entry */
415 int b_block = sizeof(cmd_block) * (cdev->nbands + 2);
416
417 /* cfile needs to guarantee one writer buffer */
418 /* + one end_clip cmd (if during image's clip path setup) */
419 /* + an end_image cmd for each band (if during image) */
420 /* + end_cmds for each band and one band range */
421 int c_block
422 = cdev->cend - cdev->cbuf + 2 + cdev->nbands * 2 + (cdev->nbands + 1);
423
424 /* All this is for partial page rendering's benefit, do only */
425 /* if partial page rendering is available */
426 if ( clist_test_VMerror_recoverable(cdev) )
427 { if (cdev->page_bfile != 0)
428 code = clist_set_memory_warning(cdev->page_bfile, b_block);
429 if (code >= 0 && cdev->page_cfile != 0)
430 code = clist_set_memory_warning(cdev->page_cfile, c_block);
431 }
432 return code;
433 }
434
435 /* Write out the current parameters that must be at the head of each page */
436 /* if async rendering is in effect */
437 private int
438 clist_emit_page_header(gx_device *dev)
439 {
440 gx_device_clist_writer * const cdev =
441 &((gx_device_clist *)dev)->writer;
442 int code = 0;
443
444 if ( (cdev->disable_mask & clist_disable_pass_thru_params) )
445 { do
446 if ( ( code = clist_put_current_params(cdev) ) >= 0 )
447 break;
448 while ( ( code = clist_VMerror_recover(cdev, code) ) < 0 );
449 cdev->permanent_error = (code < 0) ? code : 0;
450 if (cdev->permanent_error < 0)
451 cdev->error_is_retryable = 0;
452 }
453 return code;
454 }
455
456 /* Open the device's bandfiles */
457 private int
458 clist_open_output_file(gx_device *dev)
459 {
460 gx_device_clist_writer * const cdev =
461 &((gx_device_clist *)dev)->writer;
462 char fmode[4];
463 int code;
464
465 if (cdev->do_not_open_or_close_bandfiles)
466 return 0; /* external bandfile open/close managed externally */
467 cdev->page_cfile = 0; /* in case of failure */
468 cdev->page_bfile = 0; /* ditto */
469 code = clist_init(dev);
470 if (code < 0)
471 return code;
472 strcpy(fmode, "w+");
473 strcat(fmode, gp_fmode_binary_suffix);
474 cdev->page_cfname[0] = 0; /* create a new file */
475 cdev->page_bfname[0] = 0; /* ditto */
476 cdev->page_bfile_end_pos = 0;
477 if ((code = clist_fopen(cdev->page_cfname, fmode, &cdev->page_cfile,
478 cdev->bandlist_memory, cdev->bandlist_memory,
479 true)) < 0 ||
480 (code = clist_fopen(cdev->page_bfname, fmode, &cdev->page_bfile,
481 cdev->bandlist_memory, cdev->bandlist_memory,
482 true)) < 0 ||
483 (code = clist_reinit_output_file(dev)) < 0
484 ) {
485 clist_close_output_file(dev);
486 cdev->permanent_error = code;
487 cdev->error_is_retryable = 0;
488 }
489 return code;
490 }
491
492 /* Close the device by freeing the temporary files. */
493 /* Note that this does not deallocate the buffer. */
494 int
495 clist_close_output_file(gx_device *dev)
496 {
497 gx_device_clist_writer * const cdev =
498 &((gx_device_clist *)dev)->writer;
499
500 if (cdev->page_cfile != NULL) {
501 clist_fclose(cdev->page_cfile, cdev->page_cfname, true);
502 cdev->page_cfile = NULL;
503 }
504 if (cdev->page_bfile != NULL) {
505 clist_fclose(cdev->page_bfile, cdev->page_bfname, true);
506 cdev->page_bfile = NULL;
507 }
508 return 0;
509 }
510
511 /* Open the device by initializing the device state and opening the */
512 /* scratch files. */
513 private int
514 clist_open(gx_device *dev)
515 {
516 gx_device_clist_writer * const cdev =
517 &((gx_device_clist *)dev)->writer;
518 int code;
519
520 cdev->permanent_error = 0;
521 code = clist_init(dev);
522 if (code < 0)
523 return code;
524 code = clist_open_output_file(dev);
525 if ( code >= 0)
526 code = clist_emit_page_header(dev);
527 return code;
528 }
529
530 private int
531 clist_close(gx_device *dev)
532 {
533 gx_device_clist_writer * const cdev =
534 &((gx_device_clist *)dev)->writer;
535
536 if (cdev->do_not_open_or_close_bandfiles)
537 return 0;
538 return clist_close_output_file(dev);
539 }
540
541 /* The output_page procedure should never be called! */
542 private int
543 clist_output_page(gx_device * dev, int num_copies, int flush)
544 {
545 return_error(gs_error_Fatal);
546 }
547
548 /* Reset (or prepare to append to) the command list after printing a page. */
549 int
550 clist_finish_page(gx_device *dev, bool flush)
551 {
552 gx_device_clist_writer * const cdev =
553 &((gx_device_clist *)dev)->writer;
554 int code;
555
556 if (flush) {
557 if (cdev->page_cfile != 0)
558 clist_rewind(cdev->page_cfile, true, cdev->page_cfname);
559 if (cdev->page_bfile != 0)
560 clist_rewind(cdev->page_bfile, true, cdev->page_bfname);
561 cdev->page_bfile_end_pos = 0;
562 } else {
563 if (cdev->page_cfile != 0)
564 clist_fseek(cdev->page_cfile, 0L, SEEK_END, cdev->page_cfname);
565 if (cdev->page_bfile != 0)
566 clist_fseek(cdev->page_bfile, 0L, SEEK_END, cdev->page_bfname);
567 }
568 code = clist_init(dev); /* reinitialize */
569 if (code >= 0)
570 code = clist_reinit_output_file(dev);
571 if (code >= 0)
572 code = clist_emit_page_header(dev);
573
574 return code;
575 }
576
577 /* ------ Writing ------ */
578
579 /* End a page by flushing the buffer and terminating the command list. */
580 int /* ret 0 all-ok, -ve error code, or +1 ok w/low-mem warning */
581 clist_end_page(gx_device_clist_writer * cldev)
582 {
583 int code = cmd_write_buffer(cldev, cmd_opv_end_page);
584 cmd_block cb;
585 int ecode = 0;
586
587 if (code >= 0) {
588 /*
589 * Write the terminating entry in the block file.
590 * Note that because of copypage, there may be many such entries.
591 */
592 cb.band_min = cb.band_max = cmd_band_end;
593 cb.pos = (cldev->page_cfile == 0 ? 0 : clist_ftell(cldev->page_cfile));
594 clist_fwrite_chars(&cb, sizeof(cb), cldev->page_bfile);
595 cldev->page_bfile_end_pos = clist_ftell(cldev->page_bfile);
596 }
597 if (code >= 0) {
598 ecode |= code;
599 cldev->page_bfile_end_pos = clist_ftell(cldev->page_bfile);
600 }
601 if (code < 0)
602 ecode = code;
603
604 /* Reset warning margin to 0 to release reserve memory if mem files */
605 if (cldev->page_bfile != 0)
606 clist_set_memory_warning(cldev->page_bfile, 0);
607 if (cldev->page_cfile != 0)
608 clist_set_memory_warning(cldev->page_cfile, 0);
609
610 #ifdef DEBUG
611 if (gs_debug_c('l') | gs_debug_c(':'))
612 dlprintf2("[:]clist_end_page at cfile=%ld, bfile=%ld\n",
613 cb.pos, cldev->page_bfile_end_pos);
614 #endif
615 return 0;
616 }
617
618 /* Recover recoverable VM error if possible without flushing */
619 int /* ret -ve err, >= 0 if recovered w/# = cnt pages left in page queue */
620 clist_VMerror_recover(gx_device_clist_writer *cldev,
621 int old_error_code)
622 {
623 int code = old_error_code;
624 int pages_remain;
625
626 if (!clist_test_VMerror_recoverable(cldev) ||
627 !cldev->error_is_retryable ||
628 old_error_code != gs_error_VMerror
629 )
630 return old_error_code;
631
632 /* Do some rendering, return if enough memory is now free */
633 do {
634 pages_remain =
635 (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, false );
636 if (pages_remain < 0) {
637 code = pages_remain; /* abort, error or interrupt req */
638 break;
639 }
640 if (clist_reinit_output_file( (gx_device *)cldev ) == 0) {
641 code = pages_remain; /* got enough memory to continue */
642 break;
643 }
644 } while (pages_remain);
645
646 if_debug1('L', "[L]soft flush of command list, status: %d\n", code);
647 return code;
648 }
649
650 /* If recoverable VM error, flush & try to recover it */
651 int /* ret 0 ok, else -ve error */
652 clist_VMerror_recover_flush(gx_device_clist_writer *cldev,
653 int old_error_code)
654 {
655 int free_code = 0;
656 int reset_code = 0;
657 int code;
658
659 /* If the device has the ability to render partial pages, flush
660 * out the bandlist, and reset the writing state. Then, get the
661 * device to render this band. When done, see if there's now enough
662 * memory to satisfy the minimum low-memory guarantees. If not,
663 * get the device to render some more. If there's nothing left to
664 * render & still insufficient memory, declare an error condition.
665 */
666 if (!clist_test_VMerror_recoverable(cldev) ||
667 old_error_code != gs_error_VMerror
668 )
669 return old_error_code; /* sorry, don't have any means to recover this error */
670 free_code = (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, true );
671
672 /* Reset the state of bands to "don't know anything" */
673 reset_code = clist_reset( (gx_device *)cldev );
674 if (reset_code >= 0)
675 reset_code = clist_open_output_file( (gx_device *)cldev );
676 if ( reset_code >= 0 &&
677 (cldev->disable_mask & clist_disable_pass_thru_params)
678 )
679 reset_code = clist_put_current_params(cldev);
680 if (reset_code < 0) {
681 cldev->permanent_error = reset_code;
682 cldev->error_is_retryable = 0;
683 }
684
685 code = (reset_code < 0 ? reset_code : free_code < 0 ? old_error_code : 0);
686 if_debug1('L', "[L]hard flush of command list, status: %d\n", code);
687 return code;
688 }
689
690 /* Write the target device's current parameter list */
691 private int /* ret 0 all ok, -ve error */
692 clist_put_current_params(gx_device_clist_writer *cldev)
693 {
694 gx_device *target = cldev->target;
695 gs_c_param_list param_list;
696 int code;
697
698 /*
699 * If a put_params call fails, the device will be left in a closed
700 * state, but higher-level code won't notice this fact. We flag this by
701 * setting permanent_error, which prevents writing to the command list.
702 */
703
704 if (cldev->permanent_error)
705 return cldev->permanent_error;
706 gs_c_param_list_write(&param_list, cldev->memory);
707 code = (*dev_proc(target, get_params))
708 (target, (gs_param_list *)&param_list);
709 if (code >= 0) {
710 gs_c_param_list_read(&param_list);
711 code = cmd_put_params( cldev, (gs_param_list *)&param_list );
712 }
713 gs_c_param_list_release(&param_list);
714
715 return code;
716 }
717
718 /* ---------------- Driver interface ---------------- */
719
720 private int
721 clist_get_band(gx_device * dev, int y, int *band_start)
722 {
723 gx_device_clist_writer * const cdev =
724 &((gx_device_clist *)dev)->writer;
725 int band_height = cdev->page_band_height;
726 int start;
727
728 if (y < 0)
729 y = 0;
730 else if (y >= dev->height)
731 y = dev->height;
732 *band_start = start = y - y % band_height;
733 return min(dev->height - start, band_height);
734 }