]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.suse/reiserfs-strip-whitespace.diff
Add a patch to fix Intel E100 wake-on-lan problems.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.suse / reiserfs-strip-whitespace.diff
1 From: Jeff Mahoney <jeffm@suse.com>
2 Subject: reiserfs: strip trailing whitespace
3
4 This patch strips trailing whitespace from the reiserfs code.
5
6 Signed-off-by: Jeff Mahoney <jeffm@suse.com>
7
8 ---
9 fs/reiserfs/README | 4 -
10 fs/reiserfs/do_balan.c | 14 ++--
11 fs/reiserfs/file.c | 8 +-
12 fs/reiserfs/fix_node.c | 38 ++++++------
13 fs/reiserfs/hashes.c | 2
14 fs/reiserfs/ibalance.c | 10 +--
15 fs/reiserfs/inode.c | 52 ++++++++---------
16 fs/reiserfs/ioctl.c | 2
17 fs/reiserfs/journal.c | 120 ++++++++++++++++++++---------------------
18 fs/reiserfs/lbalance.c | 18 +++---
19 fs/reiserfs/namei.c | 30 +++++-----
20 fs/reiserfs/objectid.c | 2
21 fs/reiserfs/prints.c | 26 ++++----
22 fs/reiserfs/procfs.c | 2
23 fs/reiserfs/resize.c | 6 +-
24 fs/reiserfs/stree.c | 8 +-
25 fs/reiserfs/super.c | 10 +--
26 fs/reiserfs/tail_conversion.c | 2
27 include/linux/reiserfs_fs_sb.h | 14 ++--
28 19 files changed, 184 insertions(+), 184 deletions(-)
29
30 --- a/fs/reiserfs/do_balan.c
31 +++ b/fs/reiserfs/do_balan.c
32 @@ -76,21 +76,21 @@ inline void do_balance_mark_leaf_dirty(s
33 #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
34 #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
35
36 -/* summary:
37 +/* summary:
38 if deleting something ( tb->insert_size[0] < 0 )
39 return(balance_leaf_when_delete()); (flag d handled here)
40 else
41 if lnum is larger than 0 we put items into the left node
42 if rnum is larger than 0 we put items into the right node
43 if snum1 is larger than 0 we put items into the new node s1
44 - if snum2 is larger than 0 we put items into the new node s2
45 + if snum2 is larger than 0 we put items into the new node s2
46 Note that all *num* count new items being created.
47
48 It would be easier to read balance_leaf() if each of these summary
49 lines was a separate procedure rather than being inlined. I think
50 that there are many passages here and in balance_leaf_when_delete() in
51 which two calls to one procedure can replace two passages, and it
52 -might save cache space and improve software maintenance costs to do so.
53 +might save cache space and improve software maintenance costs to do so.
54
55 Vladimir made the perceptive comment that we should offload most of
56 the decision making in this function into fix_nodes/check_balance, and
57 @@ -288,15 +288,15 @@ static int balance_leaf(struct tree_bala
58 )
59 {
60 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
61 - int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
62 + int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
63 of the affected item */
64 struct buffer_info bi;
65 struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */
66 int snum[2]; /* number of items that will be placed
67 into S_new (includes partially shifted
68 items) */
69 - int sbytes[2]; /* if an item is partially shifted into S_new then
70 - if it is a directory item
71 + int sbytes[2]; /* if an item is partially shifted into S_new then
72 + if it is a directory item
73 it is the number of entries from the item that are shifted into S_new
74 else
75 it is the number of bytes from the item that are shifted into S_new
76 @@ -1983,7 +1983,7 @@ static inline void do_balance_starts(str
77 /* store_print_tb (tb); */
78
79 /* do not delete, just comment it out */
80 -/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
81 +/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
82 "check");*/
83 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
84 #ifdef CONFIG_REISERFS_CHECK
85 --- a/fs/reiserfs/file.c
86 +++ b/fs/reiserfs/file.c
87 @@ -20,14 +20,14 @@
88 ** insertion/balancing, for files that are written in one write.
89 ** It avoids unnecessary tail packings (balances) for files that are written in
90 ** multiple writes and are small enough to have tails.
91 -**
92 +**
93 ** file_release is called by the VFS layer when the file is closed. If
94 ** this is the last open file descriptor, and the file
95 ** small enough to have a tail, and the tail is currently in an
96 ** unformatted node, the tail is converted back into a direct item.
97 -**
98 +**
99 ** We use reiserfs_truncate_file to pack the tail, since it already has
100 -** all the conditions coded.
101 +** all the conditions coded.
102 */
103 static int reiserfs_file_release(struct inode *inode, struct file *filp)
104 {
105 @@ -223,7 +223,7 @@ int reiserfs_commit_page(struct inode *i
106 }
107
108 /* Write @count bytes at position @ppos in a file indicated by @file
109 - from the buffer @buf.
110 + from the buffer @buf.
111
112 generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
113 something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was
114 --- a/fs/reiserfs/fix_node.c
115 +++ b/fs/reiserfs/fix_node.c
116 @@ -30,8 +30,8 @@
117 ** get_direct_parent
118 ** get_neighbors
119 ** fix_nodes
120 - **
121 - **
122 + **
123 + **
124 **/
125
126 #include <linux/time.h>
127 @@ -377,9 +377,9 @@ static int get_num_ver(int mode, struct
128 int needed_nodes;
129 int start_item, /* position of item we start filling node from */
130 end_item, /* position of item we finish filling node by */
131 - start_bytes, /* number of first bytes (entries for directory) of start_item-th item
132 + start_bytes, /* number of first bytes (entries for directory) of start_item-th item
133 we do not include into node that is being filled */
134 - end_bytes; /* number of last bytes (entries for directory) of end_item-th item
135 + end_bytes; /* number of last bytes (entries for directory) of end_item-th item
136 we do node include into node that is being filled */
137 int split_item_positions[2]; /* these are positions in virtual item of
138 items, that are split between S[0] and
139 @@ -569,7 +569,7 @@ extern struct tree_balance *cur_tb;
140
141 /* Set parameters for balancing.
142 * Performs write of results of analysis of balancing into structure tb,
143 - * where it will later be used by the functions that actually do the balancing.
144 + * where it will later be used by the functions that actually do the balancing.
145 * Parameters:
146 * tb tree_balance structure;
147 * h current level of the node;
148 @@ -1204,7 +1204,7 @@ static inline int can_node_be_removed(in
149 * h current level of the node;
150 * inum item number in S[h];
151 * mode i - insert, p - paste;
152 - * Returns: 1 - schedule occurred;
153 + * Returns: 1 - schedule occurred;
154 * 0 - balancing for higher levels needed;
155 * -1 - no balancing for higher levels needed;
156 * -2 - no disk space.
157 @@ -1239,7 +1239,7 @@ static int ip_check_balance(struct tree_
158 /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
159 where 4th parameter is s1bytes and 5th - s2bytes
160 */
161 - short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
162 + short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
163 0,1 - do not shift and do not shift but bottle
164 2 - shift only whole item to left
165 3 - shift to left and bottle as much as possible
166 @@ -1288,7 +1288,7 @@ static int ip_check_balance(struct tree_
167
168 create_virtual_node(tb, h);
169
170 - /*
171 + /*
172 determine maximal number of items we can shift to the left neighbor (in tb structure)
173 and the maximal number of bytes that can flow to the left neighbor
174 from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
175 @@ -1349,13 +1349,13 @@ static int ip_check_balance(struct tree_
176
177 {
178 int lpar, rpar, nset, lset, rset, lrset;
179 - /*
180 + /*
181 * regular overflowing of the node
182 */
183
184 - /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
185 + /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
186 lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
187 - nset, lset, rset, lrset - shows, whether flowing items give better packing
188 + nset, lset, rset, lrset - shows, whether flowing items give better packing
189 */
190 #define FLOW 1
191 #define NO_FLOW 0 /* do not any splitting */
192 @@ -1545,7 +1545,7 @@ static int ip_check_balance(struct tree_
193 * h current level of the node;
194 * inum item number in S[h];
195 * mode i - insert, p - paste;
196 - * Returns: 1 - schedule occurred;
197 + * Returns: 1 - schedule occurred;
198 * 0 - balancing for higher levels needed;
199 * -1 - no balancing for higher levels needed;
200 * -2 - no disk space.
201 @@ -1728,7 +1728,7 @@ static int dc_check_balance_internal(str
202 * h current level of the node;
203 * inum item number in S[h];
204 * mode i - insert, p - paste;
205 - * Returns: 1 - schedule occurred;
206 + * Returns: 1 - schedule occurred;
207 * 0 - balancing for higher levels needed;
208 * -1 - no balancing for higher levels needed;
209 * -2 - no disk space.
210 @@ -1822,7 +1822,7 @@ static int dc_check_balance_leaf(struct
211 * h current level of the node;
212 * inum item number in S[h];
213 * mode d - delete, c - cut.
214 - * Returns: 1 - schedule occurred;
215 + * Returns: 1 - schedule occurred;
216 * 0 - balancing for higher levels needed;
217 * -1 - no balancing for higher levels needed;
218 * -2 - no disk space.
219 @@ -1851,7 +1851,7 @@ static int dc_check_balance(struct tree_
220 * h current level of the node;
221 * inum item number in S[h];
222 * mode i - insert, p - paste, d - delete, c - cut.
223 - * Returns: 1 - schedule occurred;
224 + * Returns: 1 - schedule occurred;
225 * 0 - balancing for higher levels needed;
226 * -1 - no balancing for higher levels needed;
227 * -2 - no disk space.
228 @@ -2296,15 +2296,15 @@ static int wait_tb_buffers_until_unlocke
229 * analyze what and where should be moved;
230 * get sufficient number of new nodes;
231 * Balancing will start only after all resources will be collected at a time.
232 - *
233 + *
234 * When ported to SMP kernels, only at the last moment after all needed nodes
235 * are collected in cache, will the resources be locked using the usual
236 * textbook ordered lock acquisition algorithms. Note that ensuring that
237 * this code neither write locks what it does not need to write lock nor locks out of order
238 * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
239 - *
240 + *
241 * fix is meant in the sense of render unchanging
242 - *
243 + *
244 * Latency might be improved by first gathering a list of what buffers are needed
245 * and then getting as many of them in parallel as possible? -Hans
246 *
247 @@ -2316,7 +2316,7 @@ static int wait_tb_buffers_until_unlocke
248 * ins_ih & ins_sd are used when inserting
249 * Returns: 1 - schedule occurred while the function worked;
250 * 0 - schedule didn't occur while the function worked;
251 - * -1 - if no_disk_space
252 + * -1 - if no_disk_space
253 */
254
255 int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted
256 --- a/fs/reiserfs/hashes.c
257 +++ b/fs/reiserfs/hashes.c
258 @@ -7,7 +7,7 @@
259 * (see Applied Cryptography, 2nd edition, p448).
260 *
261 * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998
262 - *
263 + *
264 * Jeremy has agreed to the contents of reiserfs/README. -Hans
265 * Yura's function is added (04/07/2000)
266 */
267 --- a/fs/reiserfs/ibalance.c
268 +++ b/fs/reiserfs/ibalance.c
269 @@ -278,7 +278,7 @@ static void internal_delete_childs(struc
270
271 /* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
272 * last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
273 - * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
274 + * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
275 */
276 static void internal_copy_pointers_items(struct buffer_info *dest_bi,
277 struct buffer_head *src,
278 @@ -385,7 +385,7 @@ static void internal_move_pointers_items
279 if (last_first == FIRST_TO_LAST) { /* shift_left occurs */
280 first_pointer = 0;
281 first_item = 0;
282 - /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
283 + /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
284 for key - with first_item */
285 internal_delete_pointers_items(src_bi, first_pointer,
286 first_item, cpy_num - del_par);
287 @@ -453,7 +453,7 @@ static void internal_insert_key(struct b
288 }
289 }
290
291 -/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
292 +/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
293 * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
294 * Replace d_key'th key in buffer cfl.
295 * Delete pointer_amount items and node pointers from buffer src.
296 @@ -518,7 +518,7 @@ static void internal_shift1_left(struct
297 /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */
298 }
299
300 -/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
301 +/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
302 * Copy n node pointers and n - 1 items from buffer src to buffer dest.
303 * Replace d_key'th key in buffer cfr.
304 * Delete n items and node pointers from buffer src.
305 @@ -749,7 +749,7 @@ int balance_internal(struct tree_balance
306 this means that new pointers and items must be inserted AFTER *
307 child_pos
308 }
309 - else
310 + else
311 {
312 it is the position of the leftmost pointer that must be deleted (together with
313 its corresponding key to the left of the pointer)
314 --- a/fs/reiserfs/inode.c
315 +++ b/fs/reiserfs/inode.c
316 @@ -52,7 +52,7 @@ void reiserfs_delete_inode(struct inode
317 /* Do quota update inside a transaction for journaled quotas. We must do that
318 * after delete_object so that quota updates go into the same transaction as
319 * stat data deletion */
320 - if (!err)
321 + if (!err)
322 DQUOT_FREE_INODE(inode);
323
324 if (journal_end(&th, inode->i_sb, jbegin_count))
325 @@ -363,7 +363,7 @@ static int _get_block_create_0(struct in
326 }
327 /* make sure we don't read more bytes than actually exist in
328 ** the file. This can happen in odd cases where i_size isn't
329 - ** correct, and when direct item padding results in a few
330 + ** correct, and when direct item padding results in a few
331 ** extra bytes at the end of the direct item
332 */
333 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
334 @@ -438,15 +438,15 @@ static int reiserfs_bmap(struct inode *i
335 ** -ENOENT instead of a valid buffer. block_prepare_write expects to
336 ** be able to do i/o on the buffers returned, unless an error value
337 ** is also returned.
338 -**
339 +**
340 ** So, this allows block_prepare_write to be used for reading a single block
341 ** in a page. Where it does not produce a valid page for holes, or past the
342 ** end of the file. This turns out to be exactly what we need for reading
343 ** tails for conversion.
344 **
345 ** The point of the wrapper is forcing a certain value for create, even
346 -** though the VFS layer is calling this function with create==1. If you
347 -** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
348 +** though the VFS layer is calling this function with create==1. If you
349 +** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
350 ** don't use this function.
351 */
352 static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
353 @@ -602,7 +602,7 @@ int reiserfs_get_block(struct inode *ino
354 int done;
355 int fs_gen;
356 struct reiserfs_transaction_handle *th = NULL;
357 - /* space reserved in transaction batch:
358 + /* space reserved in transaction batch:
359 . 3 balancings in direct->indirect conversion
360 . 1 block involved into reiserfs_update_sd()
361 XXX in practically impossible worst case direct2indirect()
362 @@ -754,7 +754,7 @@ int reiserfs_get_block(struct inode *ino
363 reiserfs_write_unlock(inode->i_sb);
364
365 /* the item was found, so new blocks were not added to the file
366 - ** there is no need to make sure the inode is updated with this
367 + ** there is no need to make sure the inode is updated with this
368 ** transaction
369 */
370 return retval;
371 @@ -986,7 +986,7 @@ int reiserfs_get_block(struct inode *ino
372
373 /* this loop could log more blocks than we had originally asked
374 ** for. So, we have to allow the transaction to end if it is
375 - ** too big or too full. Update the inode so things are
376 + ** too big or too full. Update the inode so things are
377 ** consistent if we crash before the function returns
378 **
379 ** release the path so that anybody waiting on the path before
380 @@ -997,7 +997,7 @@ int reiserfs_get_block(struct inode *ino
381 if (retval)
382 goto failure;
383 }
384 - /* inserting indirect pointers for a hole can take a
385 + /* inserting indirect pointers for a hole can take a
386 ** long time. reschedule if needed
387 */
388 cond_resched();
389 @@ -1444,7 +1444,7 @@ void reiserfs_read_locked_inode(struct i
390 update sd on unlink all that is required is to check for nlink
391 here. This bug was first found by Sizif when debugging
392 SquidNG/Butterfly, forgotten, and found again after Philippe
393 - Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
394 + Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
395
396 More logical fix would require changes in fs/inode.c:iput() to
397 remove inode from hash-table _after_ fs cleaned disk stuff up and
398 @@ -1628,7 +1628,7 @@ int reiserfs_write_inode(struct inode *i
399 if (inode->i_sb->s_flags & MS_RDONLY)
400 return -EROFS;
401 /* memory pressure can sometimes initiate write_inode calls with sync == 1,
402 - ** these cases are just when the system needs ram, not when the
403 + ** these cases are just when the system needs ram, not when the
404 ** inode needs to reach disk for safety, and they can safely be
405 ** ignored because the altered inode has already been logged.
406 */
407 @@ -1745,7 +1745,7 @@ static int reiserfs_new_symlink(struct r
408 /* inserts the stat data into the tree, and then calls
409 reiserfs_new_directory (to insert ".", ".." item if new object is
410 directory) or reiserfs_new_symlink (to insert symlink body if new
411 - object is symlink) or nothing (if new object is regular file)
412 + object is symlink) or nothing (if new object is regular file)
413
414 NOTE! uid and gid must already be set in the inode. If we return
415 non-zero due to an error, we have to drop the quota previously allocated
416 @@ -1753,7 +1753,7 @@ static int reiserfs_new_symlink(struct r
417 if we return non-zero, we also end the transaction. */
418 int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
419 struct inode *dir, int mode, const char *symname,
420 - /* 0 for regular, EMTRY_DIR_SIZE for dirs,
421 + /* 0 for regular, EMTRY_DIR_SIZE for dirs,
422 strlen (symname) for symlinks) */
423 loff_t i_size, struct dentry *dentry,
424 struct inode *inode,
425 @@ -1788,7 +1788,7 @@ int reiserfs_new_inode(struct reiserfs_t
426 goto out_bad_inode;
427 }
428 if (old_format_only(sb))
429 - /* not a perfect generation count, as object ids can be reused, but
430 + /* not a perfect generation count, as object ids can be reused, but
431 ** this is as good as reiserfs can do right now.
432 ** note that the private part of inode isn't filled in yet, we have
433 ** to use the directory.
434 @@ -2086,7 +2086,7 @@ int reiserfs_truncate_file(struct inode
435
436 if (p_s_inode->i_size > 0) {
437 if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
438 - // -ENOENT means we truncated past the end of the file,
439 + // -ENOENT means we truncated past the end of the file,
440 // and get_block_create_0 could not find a block to read in,
441 // which is ok.
442 if (error != -ENOENT)
443 @@ -2098,11 +2098,11 @@ int reiserfs_truncate_file(struct inode
444 }
445 }
446
447 - /* so, if page != NULL, we have a buffer head for the offset at
448 - ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
449 - ** then we have an unformatted node. Otherwise, we have a direct item,
450 - ** and no zeroing is required on disk. We zero after the truncate,
451 - ** because the truncate might pack the item anyway
452 + /* so, if page != NULL, we have a buffer head for the offset at
453 + ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
454 + ** then we have an unformatted node. Otherwise, we have a direct item,
455 + ** and no zeroing is required on disk. We zero after the truncate,
456 + ** because the truncate might pack the item anyway
457 ** (it will unmap bh if it packs).
458 */
459 /* it is enough to reserve space in transaction for 2 balancings:
460 @@ -2311,8 +2311,8 @@ static int map_block_for_writepage(struc
461 return retval;
462 }
463
464 -/*
465 - * mason@suse.com: updated in 2.5.54 to follow the same general io
466 +/*
467 + * mason@suse.com: updated in 2.5.54 to follow the same general io
468 * start/recovery path as __block_write_full_page, along with special
469 * code to handle reiserfs tails.
470 */
471 @@ -2452,7 +2452,7 @@ static int reiserfs_write_full_page(stru
472 unlock_page(page);
473
474 /*
475 - * since any buffer might be the only dirty buffer on the page,
476 + * since any buffer might be the only dirty buffer on the page,
477 * the first submit_bh can bring the page out of writeback.
478 * be careful with the buffers.
479 */
480 @@ -2471,8 +2471,8 @@ static int reiserfs_write_full_page(stru
481 if (nr == 0) {
482 /*
483 * if this page only had a direct item, it is very possible for
484 - * no io to be required without there being an error. Or,
485 - * someone else could have locked them and sent them down the
486 + * no io to be required without there being an error. Or,
487 + * someone else could have locked them and sent them down the
488 * pipe without locking the page
489 */
490 bh = head;
491 @@ -2491,7 +2491,7 @@ static int reiserfs_write_full_page(stru
492
493 fail:
494 /* catches various errors, we need to make sure any valid dirty blocks
495 - * get to the media. The page is currently locked and not marked for
496 + * get to the media. The page is currently locked and not marked for
497 * writeback
498 */
499 ClearPageUptodate(page);
500 --- a/fs/reiserfs/ioctl.c
501 +++ b/fs/reiserfs/ioctl.c
502 @@ -189,7 +189,7 @@ int reiserfs_unpack(struct inode *inode,
503 }
504
505 /* we unpack by finding the page with the tail, and calling
506 - ** reiserfs_prepare_write on that page. This will force a
507 + ** reiserfs_prepare_write on that page. This will force a
508 ** reiserfs_get_block to unpack the tail for us.
509 */
510 index = inode->i_size >> PAGE_CACHE_SHIFT;
511 --- a/fs/reiserfs/journal.c
512 +++ b/fs/reiserfs/journal.c
513 @@ -1,36 +1,36 @@
514 /*
515 ** Write ahead logging implementation copyright Chris Mason 2000
516 **
517 -** The background commits make this code very interelated, and
518 +** The background commits make this code very interelated, and
519 ** overly complex. I need to rethink things a bit....The major players:
520 **
521 -** journal_begin -- call with the number of blocks you expect to log.
522 +** journal_begin -- call with the number of blocks you expect to log.
523 ** If the current transaction is too
524 -** old, it will block until the current transaction is
525 +** old, it will block until the current transaction is
526 ** finished, and then start a new one.
527 -** Usually, your transaction will get joined in with
528 +** Usually, your transaction will get joined in with
529 ** previous ones for speed.
530 **
531 -** journal_join -- same as journal_begin, but won't block on the current
532 +** journal_join -- same as journal_begin, but won't block on the current
533 ** transaction regardless of age. Don't ever call
534 -** this. Ever. There are only two places it should be
535 +** this. Ever. There are only two places it should be
536 ** called from, and they are both inside this file.
537 **
538 -** journal_mark_dirty -- adds blocks into this transaction. clears any flags
539 +** journal_mark_dirty -- adds blocks into this transaction. clears any flags
540 ** that might make them get sent to disk
541 -** and then marks them BH_JDirty. Puts the buffer head
542 -** into the current transaction hash.
543 +** and then marks them BH_JDirty. Puts the buffer head
544 +** into the current transaction hash.
545 **
546 ** journal_end -- if the current transaction is batchable, it does nothing
547 ** otherwise, it could do an async/synchronous commit, or
548 -** a full flush of all log and real blocks in the
549 +** a full flush of all log and real blocks in the
550 ** transaction.
551 **
552 -** flush_old_commits -- if the current transaction is too old, it is ended and
553 -** commit blocks are sent to disk. Forces commit blocks
554 -** to disk for all backgrounded commits that have been
555 +** flush_old_commits -- if the current transaction is too old, it is ended and
556 +** commit blocks are sent to disk. Forces commit blocks
557 +** to disk for all backgrounded commits that have been
558 ** around too long.
559 -** -- Note, if you call this as an immediate flush from
560 +** -- Note, if you call this as an immediate flush from
561 ** from within kupdate, it will ignore the immediate flag
562 */
563
564 @@ -212,7 +212,7 @@ static void allocate_bitmap_nodes(struct
565 list_add(&bn->list, &journal->j_bitmap_nodes);
566 journal->j_free_bitmap_nodes++;
567 } else {
568 - break; // this is ok, we'll try again when more are needed
569 + break; /* this is ok, we'll try again when more are needed */
570 }
571 }
572 }
573 @@ -283,7 +283,7 @@ static int free_bitmap_nodes(struct supe
574 }
575
576 /*
577 -** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
578 +** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
579 ** jb_array is the array to be filled in.
580 */
581 int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
582 @@ -315,7 +315,7 @@ int reiserfs_allocate_list_bitmaps(struc
583 }
584
585 /*
586 -** find an available list bitmap. If you can't find one, flush a commit list
587 +** find an available list bitmap. If you can't find one, flush a commit list
588 ** and try again
589 */
590 static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
591 @@ -348,7 +348,7 @@ static struct reiserfs_list_bitmap *get_
592 return jb;
593 }
594
595 -/*
596 +/*
597 ** allocates a new chunk of X nodes, and links them all together as a list.
598 ** Uses the cnode->next and cnode->prev pointers
599 ** returns NULL on failure
600 @@ -376,7 +376,7 @@ static struct reiserfs_journal_cnode *al
601 }
602
603 /*
604 -** pulls a cnode off the free list, or returns NULL on failure
605 +** pulls a cnode off the free list, or returns NULL on failure
606 */
607 static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
608 {
609 @@ -403,7 +403,7 @@ static struct reiserfs_journal_cnode *ge
610 }
611
612 /*
613 -** returns a cnode to the free list
614 +** returns a cnode to the free list
615 */
616 static void free_cnode(struct super_block *p_s_sb,
617 struct reiserfs_journal_cnode *cn)
618 @@ -1192,8 +1192,8 @@ static int flush_commit_list(struct supe
619 }
620
621 /*
622 -** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
623 -** returns NULL if it can't find anything
624 +** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
625 +** returns NULL if it can't find anything
626 */
627 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
628 reiserfs_journal_cnode
629 @@ -1335,8 +1335,8 @@ static int update_journal_header_block(s
630 return _update_journal_header_block(p_s_sb, offset, trans_id);
631 }
632
633 -/*
634 -** flush any and all journal lists older than you are
635 +/*
636 +** flush any and all journal lists older than you are
637 ** can only be called from flush_journal_list
638 */
639 static int flush_older_journal_lists(struct super_block *p_s_sb,
640 @@ -1382,8 +1382,8 @@ static void del_from_work_list(struct su
641 ** always set flushall to 1, unless you are calling from inside
642 ** flush_journal_list
643 **
644 -** IMPORTANT. This can only be called while there are no journal writers,
645 -** and the journal is locked. That means it can only be called from
646 +** IMPORTANT. This can only be called while there are no journal writers,
647 +** and the journal is locked. That means it can only be called from
648 ** do_journal_end, or by journal_release
649 */
650 static int flush_journal_list(struct super_block *s,
651 @@ -1429,7 +1429,7 @@ static int flush_journal_list(struct sup
652 goto flush_older_and_return;
653 }
654
655 - /* start by putting the commit list on disk. This will also flush
656 + /* start by putting the commit list on disk. This will also flush
657 ** the commit lists of any olders transactions
658 */
659 flush_commit_list(s, jl, 1);
660 @@ -1444,8 +1444,8 @@ static int flush_journal_list(struct sup
661 goto flush_older_and_return;
662 }
663
664 - /* loop through each cnode, see if we need to write it,
665 - ** or wait on a more recent transaction, or just ignore it
666 + /* loop through each cnode, see if we need to write it,
667 + ** or wait on a more recent transaction, or just ignore it
668 */
669 if (atomic_read(&(journal->j_wcount)) != 0) {
670 reiserfs_panic(s, "journal-844", "journal list is flushing, "
671 @@ -1473,8 +1473,8 @@ static int flush_journal_list(struct sup
672 if (!pjl && cn->bh) {
673 saved_bh = cn->bh;
674
675 - /* we do this to make sure nobody releases the buffer while
676 - ** we are working with it
677 + /* we do this to make sure nobody releases the buffer while
678 + ** we are working with it
679 */
680 get_bh(saved_bh);
681
682 @@ -1497,8 +1497,8 @@ static int flush_journal_list(struct sup
683 goto free_cnode;
684 }
685
686 - /* bh == NULL when the block got to disk on its own, OR,
687 - ** the block got freed in a future transaction
688 + /* bh == NULL when the block got to disk on its own, OR,
689 + ** the block got freed in a future transaction
690 */
691 if (saved_bh == NULL) {
692 goto free_cnode;
693 @@ -1586,7 +1586,7 @@ static int flush_journal_list(struct sup
694 __func__);
695 flush_older_and_return:
696
697 - /* before we can update the journal header block, we _must_ flush all
698 + /* before we can update the journal header block, we _must_ flush all
699 ** real blocks from all older transactions to disk. This is because
700 ** once the header block is updated, this transaction will not be
701 ** replayed after a crash
702 @@ -1596,7 +1596,7 @@ static int flush_journal_list(struct sup
703 }
704
705 err = journal->j_errno;
706 - /* before we can remove everything from the hash tables for this
707 + /* before we can remove everything from the hash tables for this
708 ** transaction, we must make sure it can never be replayed
709 **
710 ** since we are only called from do_journal_end, we know for sure there
711 @@ -2016,9 +2016,9 @@ static int journal_compare_desc_commit(s
712 return 0;
713 }
714
715 -/* returns 0 if it did not find a description block
716 +/* returns 0 if it did not find a description block
717 ** returns -1 if it found a corrupt commit block
718 -** returns 1 if both desc and commit were valid
719 +** returns 1 if both desc and commit were valid
720 */
721 static int journal_transaction_is_valid(struct super_block *p_s_sb,
722 struct buffer_head *d_bh,
723 @@ -2380,8 +2380,8 @@ static int journal_read(struct super_blo
724 bdevname(journal->j_dev_bd, b));
725 start = get_seconds();
726
727 - /* step 1, read in the journal header block. Check the transaction it says
728 - ** is the first unflushed, and if that transaction is not valid,
729 + /* step 1, read in the journal header block. Check the transaction it says
730 + ** is the first unflushed, and if that transaction is not valid,
731 ** replay is done
732 */
733 journal->j_header_bh = journal_bread(p_s_sb,
734 @@ -2406,8 +2406,8 @@ static int journal_read(struct super_blo
735 le32_to_cpu(jh->j_last_flush_trans_id));
736 valid_journal_header = 1;
737
738 - /* now, we try to read the first unflushed offset. If it is not valid,
739 - ** there is nothing more we can do, and it makes no sense to read
740 + /* now, we try to read the first unflushed offset. If it is not valid,
741 + ** there is nothing more we can do, and it makes no sense to read
742 ** through the whole log.
743 */
744 d_bh =
745 @@ -2916,7 +2916,7 @@ int journal_transaction_should_end(struc
746 return 0;
747 }
748
749 -/* this must be called inside a transaction, and requires the
750 +/* this must be called inside a transaction, and requires the
751 ** kernel_lock to be held
752 */
753 void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
754 @@ -3037,7 +3037,7 @@ static int do_journal_begin_r(struct rei
755 now = get_seconds();
756
757 /* if there is no room in the journal OR
758 - ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
759 + ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
760 ** we don't sleep if there aren't other writers
761 */
762
763 @@ -3237,7 +3237,7 @@ int journal_begin(struct reiserfs_transa
764 **
765 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
766 ** transaction is committed.
767 -**
768 +**
769 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
770 */
771 int journal_mark_dirty(struct reiserfs_transaction_handle *th,
772 @@ -3287,7 +3287,7 @@ int journal_mark_dirty(struct reiserfs_t
773 atomic_read(&(journal->j_wcount)));
774 return 1;
775 }
776 - /* this error means I've screwed up, and we've overflowed the transaction.
777 + /* this error means I've screwed up, and we've overflowed the transaction.
778 ** Nothing can be done here, except make the FS readonly or panic.
779 */
780 if (journal->j_len >= journal->j_trans_max) {
781 @@ -3377,7 +3377,7 @@ int journal_end(struct reiserfs_transact
782 }
783 }
784
785 -/* removes from the current transaction, relsing and descrementing any counters.
786 +/* removes from the current transaction, relsing and descrementing any counters.
787 ** also files the removed buffer directly onto the clean list
788 **
789 ** called by journal_mark_freed when a block has been deleted
790 @@ -3475,7 +3475,7 @@ static int can_dirty(struct reiserfs_jou
791 }
792
793 /* syncs the commit blocks, but does not force the real buffers to disk
794 -** will wait until the current transaction is done/committed before returning
795 +** will wait until the current transaction is done/committed before returning
796 */
797 int journal_end_sync(struct reiserfs_transaction_handle *th,
798 struct super_block *p_s_sb, unsigned long nblocks)
799 @@ -3557,13 +3557,13 @@ int reiserfs_flush_old_commits(struct su
800
801 /*
802 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
803 -**
804 -** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
805 +**
806 +** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
807 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
808 ** flushes the commit list and returns 0.
809 **
810 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
811 -**
812 +**
813 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
814 */
815 static int check_journal_end(struct reiserfs_transaction_handle *th,
816 @@ -3591,7 +3591,7 @@ static int check_journal_end(struct reis
817 atomic_dec(&(journal->j_wcount));
818 }
819
820 - /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
821 + /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
822 ** will be dealt with by next transaction that actually writes something, but should be taken
823 ** care of in this trans
824 */
825 @@ -3600,7 +3600,7 @@ static int check_journal_end(struct reis
826 /* if wcount > 0, and we are called to with flush or commit_now,
827 ** we wait on j_join_wait. We will wake up when the last writer has
828 ** finished the transaction, and started it on its way to the disk.
829 - ** Then, we flush the commit or journal list, and just return 0
830 + ** Then, we flush the commit or journal list, and just return 0
831 ** because the rest of journal end was already done for this transaction.
832 */
833 if (atomic_read(&(journal->j_wcount)) > 0) {
834 @@ -3671,7 +3671,7 @@ static int check_journal_end(struct reis
835 /*
836 ** Does all the work that makes deleting blocks safe.
837 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
838 -**
839 +**
840 ** otherwise:
841 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
842 ** before this transaction has finished.
843 @@ -3875,7 +3875,7 @@ extern struct tree_balance *cur_tb;
844 ** be written to disk while we are altering it. So, we must:
845 ** clean it
846 ** wait on it.
847 -**
848 +**
849 */
850 int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
851 struct buffer_head *bh, int wait)
852 @@ -3917,7 +3917,7 @@ static void flush_old_journal_lists(stru
853 }
854 }
855
856 -/*
857 +/*
858 ** long and ugly. If flush, will not return until all commit
859 ** blocks and all real buffers in the trans are on disk.
860 ** If no_async, won't return until all commit blocks are on disk.
861 @@ -3978,7 +3978,7 @@ static int do_journal_end(struct reiserf
862 wait_on_commit = 1;
863 }
864
865 - /* check_journal_end locks the journal, and unlocks if it does not return 1
866 + /* check_journal_end locks the journal, and unlocks if it does not return 1
867 ** it tells us if we should continue with the journal_end, or just return
868 */
869 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
870 @@ -4075,7 +4075,7 @@ static int do_journal_end(struct reiserf
871 last_cn->next = jl_cn;
872 }
873 last_cn = jl_cn;
874 - /* make sure the block we are trying to log is not a block
875 + /* make sure the block we are trying to log is not a block
876 of journal or reserved area */
877
878 if (is_block_in_log_or_reserved_area
879 @@ -4222,9 +4222,9 @@ static int do_journal_end(struct reiserf
880 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
881 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
882
883 - /* if the next transaction has any chance of wrapping, flush
884 - ** transactions that might get overwritten. If any journal lists are very
885 - ** old flush them as well.
886 + /* if the next transaction has any chance of wrapping, flush
887 + ** transactions that might get overwritten. If any journal lists are very
888 + ** old flush them as well.
889 */
890 first_jl:
891 list_for_each_safe(entry, safe, &journal->j_journal_list) {
892 --- a/fs/reiserfs/lbalance.c
893 +++ b/fs/reiserfs/lbalance.c
894 @@ -119,8 +119,8 @@ static void leaf_copy_dir_entries(struct
895 DEH_SIZE * copy_count + copy_records_len);
896 }
897
898 -/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
899 - part of it or nothing (see the return 0 below) from SOURCE to the end
900 +/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
901 + part of it or nothing (see the return 0 below) from SOURCE to the end
902 (if last_first) or beginning (!last_first) of the DEST */
903 /* returns 1 if anything was copied, else 0 */
904 static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
905 @@ -396,7 +396,7 @@ static void leaf_item_bottle(struct buff
906 else {
907 struct item_head n_ih;
908
909 - /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
910 + /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
911 part defined by 'cpy_bytes'; create new item header; change old item_header (????);
912 n_ih = new item_header;
913 */
914 @@ -426,7 +426,7 @@ static void leaf_item_bottle(struct buff
915 else {
916 struct item_head n_ih;
917
918 - /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
919 + /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
920 part defined by 'cpy_bytes'; create new item header;
921 n_ih = new item_header;
922 */
923 @@ -724,7 +724,7 @@ int leaf_shift_right(struct tree_balance
924 static void leaf_delete_items_entirely(struct buffer_info *bi,
925 int first, int del_num);
926 /* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
927 - If not.
928 + If not.
929 If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
930 the first item. Part defined by del_bytes. Don't delete first item header
931 If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
932 @@ -783,7 +783,7 @@ void leaf_delete_items(struct buffer_inf
933 /* len = body len of item */
934 len = ih_item_len(ih);
935
936 - /* delete the part of the last item of the bh
937 + /* delete the part of the last item of the bh
938 do not delete item header
939 */
940 leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1,
941 @@ -865,7 +865,7 @@ void leaf_insert_into_buf(struct buffer_
942 }
943 }
944
945 -/* paste paste_size bytes to affected_item_num-th item.
946 +/* paste paste_size bytes to affected_item_num-th item.
947 When item is a directory, this only prepare space for new entries */
948 void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
949 int pos_in_item, int paste_size,
950 @@ -1022,7 +1022,7 @@ static int leaf_cut_entries(struct buffe
951 /* when cut item is part of regular file
952 pos_in_item - first byte that must be cut
953 cut_size - number of bytes to be cut beginning from pos_in_item
954 -
955 +
956 when cut item is part of directory
957 pos_in_item - number of first deleted entry
958 cut_size - count of deleted entries
959 @@ -1275,7 +1275,7 @@ void leaf_paste_entries(struct buffer_in
960 /* change item key if necessary (when we paste before 0-th entry */
961 if (!before) {
962 set_le_ih_k_offset(ih, deh_offset(new_dehs));
963 -/* memcpy (&ih->ih_key.k_offset,
964 +/* memcpy (&ih->ih_key.k_offset,
965 &new_dehs->deh_offset, SHORT_KEY_SIZE);*/
966 }
967 #ifdef CONFIG_REISERFS_CHECK
968 --- a/fs/reiserfs/namei.c
969 +++ b/fs/reiserfs/namei.c
970 @@ -106,7 +106,7 @@ key of the first directory entry in it.
971 This function first calls search_by_key, then, if item whose first
972 entry matches is not found it looks for the entry inside directory
973 item found by search_by_key. Fills the path to the entry, and to the
974 -entry position in the item
975 +entry position in the item
976
977 */
978
979 @@ -371,7 +371,7 @@ static struct dentry *reiserfs_lookup(st
980 return d_splice_alias(inode, dentry);
981 }
982
983 -/*
984 +/*
985 ** looks up the dentry of the parent directory for child.
986 ** taken from ext2_get_parent
987 */
988 @@ -410,7 +410,7 @@ struct dentry *reiserfs_get_parent(struc
989 return parent;
990 }
991
992 -/* add entry to the directory (entry can be hidden).
993 +/* add entry to the directory (entry can be hidden).
994
995 insert definition of when hidden directories are used here -Hans
996
997 @@ -568,7 +568,7 @@ static int drop_new_inode(struct inode *
998 return 0;
999 }
1000
1001 -/* utility function that does setup for reiserfs_new_inode.
1002 +/* utility function that does setup for reiserfs_new_inode.
1003 ** DQUOT_INIT needs lots of credits so it's better to have it
1004 ** outside of a transaction, so we had to pull some bits of
1005 ** reiserfs_new_inode out into this func.
1006 @@ -823,7 +823,7 @@ static inline int reiserfs_empty_dir(str
1007 {
1008 /* we can cheat because an old format dir cannot have
1009 ** EMPTY_DIR_SIZE, and a new format dir cannot have
1010 - ** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
1011 + ** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
1012 ** regardless of disk format version, the directory is empty.
1013 */
1014 if (inode->i_size != EMPTY_DIR_SIZE &&
1015 @@ -1163,7 +1163,7 @@ static int reiserfs_link(struct dentry *
1016 return retval;
1017 }
1018
1019 -// de contains information pointing to an entry which
1020 +/* de contains information pointing to an entry which */
1021 static int de_still_valid(const char *name, int len,
1022 struct reiserfs_dir_entry *de)
1023 {
1024 @@ -1207,10 +1207,10 @@ static void set_ino_in_dir_entry(struct
1025 de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid;
1026 }
1027
1028 -/*
1029 +/*
1030 * process, that is going to call fix_nodes/do_balance must hold only
1031 * one path. If it holds 2 or more, it can get into endless waiting in
1032 - * get_empty_nodes or its clones
1033 + * get_empty_nodes or its clones
1034 */
1035 static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1036 struct inode *new_dir, struct dentry *new_dentry)
1037 @@ -1264,7 +1264,7 @@ static int reiserfs_rename(struct inode
1038
1039 old_inode_mode = old_inode->i_mode;
1040 if (S_ISDIR(old_inode_mode)) {
1041 - // make sure, that directory being renamed has correct ".."
1042 + // make sure, that directory being renamed has correct ".."
1043 // and that its new parent directory has not too many links
1044 // already
1045
1046 @@ -1275,8 +1275,8 @@ static int reiserfs_rename(struct inode
1047 }
1048 }
1049
1050 - /* directory is renamed, its parent directory will be changed,
1051 - ** so find ".." entry
1052 + /* directory is renamed, its parent directory will be changed,
1053 + ** so find ".." entry
1054 */
1055 dot_dot_de.de_gen_number_bit_string = NULL;
1056 retval =
1057 @@ -1386,9 +1386,9 @@ static int reiserfs_rename(struct inode
1058 this stuff, yes? Then, having
1059 gathered everything into RAM we
1060 should lock the buffers, yes? -Hans */
1061 - /* probably. our rename needs to hold more
1062 - ** than one path at once. The seals would
1063 - ** have to be written to deal with multi-path
1064 + /* probably. our rename needs to hold more
1065 + ** than one path at once. The seals would
1066 + ** have to be written to deal with multi-path
1067 ** issues -chris
1068 */
1069 /* sanity checking before doing the rename - avoid races many
1070 @@ -1466,7 +1466,7 @@ static int reiserfs_rename(struct inode
1071 }
1072
1073 if (S_ISDIR(old_inode_mode)) {
1074 - // adjust ".." of renamed directory
1075 + /* adjust ".." of renamed directory */
1076 set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
1077 journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh);
1078
1079 --- a/fs/reiserfs/objectid.c
1080 +++ b/fs/reiserfs/objectid.c
1081 @@ -180,7 +180,7 @@ int reiserfs_convert_objectid_map_v1(str
1082
1083 if (cur_size > new_size) {
1084 /* mark everyone used that was listed as free at the end of the objectid
1085 - ** map
1086 + ** map
1087 */
1088 objectid_map[new_size - 1] = objectid_map[cur_size - 1];
1089 set_sb_oid_cursize(disk_sb, new_size);
1090 --- a/fs/reiserfs/prints.c
1091 +++ b/fs/reiserfs/prints.c
1092 @@ -181,11 +181,11 @@ static char *is_there_reiserfs_struct(ch
1093 appropriative printk. With this reiserfs_warning you can use format
1094 specification for complex structures like you used to do with
1095 printfs for integers, doubles and pointers. For instance, to print
1096 - out key structure you have to write just:
1097 - reiserfs_warning ("bad key %k", key);
1098 - instead of
1099 - printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
1100 - key->k_offset, key->k_uniqueness);
1101 + out key structure you have to write just:
1102 + reiserfs_warning ("bad key %k", key);
1103 + instead of
1104 + printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
1105 + key->k_offset, key->k_uniqueness);
1106 */
1107
1108 static void prepare_error_buf(const char *fmt, va_list args)
1109 @@ -247,11 +247,11 @@ static void prepare_error_buf(const char
1110 }
1111
1112 /* in addition to usual conversion specifiers this accepts reiserfs
1113 - specific conversion specifiers:
1114 - %k to print little endian key,
1115 - %K to print cpu key,
1116 + specific conversion specifiers:
1117 + %k to print little endian key,
1118 + %K to print cpu key,
1119 %h to print item_head,
1120 - %t to print directory entry
1121 + %t to print directory entry
1122 %z to print block head (arg must be struct buffer_head *
1123 %b to print buffer_head
1124 */
1125 @@ -317,17 +317,17 @@ void reiserfs_debug(struct super_block *
1126 maintainer-errorid. Don't bother with reusing errorids, there are
1127 lots of numbers out there.
1128
1129 - Example:
1130 -
1131 + Example:
1132 +
1133 reiserfs_panic(
1134 p_sb, "reiser-29: reiserfs_new_blocknrs: "
1135 "one of search_start or rn(%d) is equal to MAX_B_NUM,"
1136 - "which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
1137 + "which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
1138 rn, bh
1139 );
1140
1141 Regular panic()s sometimes clear the screen before the message can
1142 - be read, thus the need for the while loop.
1143 + be read, thus the need for the while loop.
1144
1145 Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
1146 pointless complexity):
1147 --- a/fs/reiserfs/procfs.c
1148 +++ b/fs/reiserfs/procfs.c
1149 @@ -636,7 +636,7 @@ int reiserfs_global_version_in_proc(char
1150 *
1151 */
1152
1153 -/*
1154 +/*
1155 * Make Linus happy.
1156 * Local variables:
1157 * c-indentation-style: "K&R"
1158 --- a/fs/reiserfs/README
1159 +++ b/fs/reiserfs/README
1160 @@ -1,4 +1,4 @@
1161 -[LICENSING]
1162 +[LICENSING]
1163
1164 ReiserFS is hereby licensed under the GNU General
1165 Public License version 2.
1166 @@ -31,7 +31,7 @@ the GPL as not allowing those additional
1167 it wrongly, and Richard Stallman agrees with me, when carefully read
1168 you can see that those restrictions on additional terms do not apply
1169 to the owner of the copyright, and my interpretation of this shall
1170 -govern for this license.
1171 +govern for this license.
1172
1173 Finally, nothing in this license shall be interpreted to allow you to
1174 fail to fairly credit me, or to remove my credits, without my
1175 --- a/fs/reiserfs/resize.c
1176 +++ b/fs/reiserfs/resize.c
1177 @@ -1,8 +1,8 @@
1178 -/*
1179 +/*
1180 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
1181 */
1182
1183 -/*
1184 +/*
1185 * Written by Alexander Zarochentcev.
1186 *
1187 * The kernel part of the (on-line) reiserfs resizer.
1188 @@ -101,7 +101,7 @@ int reiserfs_resize(struct super_block *
1189 memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
1190
1191 /* just in case vfree schedules on us, copy the new
1192 - ** pointer into the journal struct before freeing the
1193 + ** pointer into the journal struct before freeing the
1194 ** old one
1195 */
1196 node_tmp = jb->bitmaps;
1197 --- a/fs/reiserfs/stree.c
1198 +++ b/fs/reiserfs/stree.c
1199 @@ -77,7 +77,7 @@ inline void copy_item_head(struct item_h
1200 /* k1 is pointer to on-disk structure which is stored in little-endian
1201 form. k2 is pointer to cpu variable. For key of items of the same
1202 object this returns 0.
1203 - Returns: -1 if key1 < key2
1204 + Returns: -1 if key1 < key2
1205 0 if key1 == key2
1206 1 if key1 > key2 */
1207 inline int comp_short_keys(const struct reiserfs_key *le_key,
1208 @@ -890,7 +890,7 @@ static inline int prepare_for_direct_ite
1209 }
1210 // new file gets truncated
1211 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
1212 - //
1213 + //
1214 round_len = ROUND_UP(new_file_length);
1215 /* this was n_new_file_length < le_ih ... */
1216 if (round_len < le_ih_k_offset(le_ih)) {
1217 @@ -1443,7 +1443,7 @@ static int maybe_indirect_to_direct(stru
1218 if (atomic_read(&p_s_inode->i_count) > 1 ||
1219 !tail_has_to_be_packed(p_s_inode) ||
1220 !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
1221 - // leave tail in an unformatted node
1222 + /* leave tail in an unformatted node */
1223 *p_c_mode = M_SKIP_BALANCING;
1224 cut_bytes =
1225 n_block_size - (n_new_file_size & (n_block_size - 1));
1226 @@ -1826,7 +1826,7 @@ int reiserfs_do_truncate(struct reiserfs
1227 /* While there are bytes to truncate and previous file item is presented in the tree. */
1228
1229 /*
1230 - ** This loop could take a really long time, and could log
1231 + ** This loop could take a really long time, and could log
1232 ** many more blocks than a transaction can hold. So, we do a polite
1233 ** journal end here, and if the transaction needs ending, we make
1234 ** sure the file is consistent before ending the current trans
1235 --- a/fs/reiserfs/super.c
1236 +++ b/fs/reiserfs/super.c
1237 @@ -754,7 +754,7 @@ static int reiserfs_getopt(struct super_
1238 char **opt_arg, unsigned long *bit_flags)
1239 {
1240 char *p;
1241 - /* foo=bar,
1242 + /* foo=bar,
1243 ^ ^ ^
1244 | | +-- option_end
1245 | +-- arg_start
1246 @@ -1346,7 +1346,7 @@ static int read_super_block(struct super
1247 }
1248 //
1249 // ok, reiserfs signature (old or new) found in at the given offset
1250 - //
1251 + //
1252 fs_blocksize = sb_blocksize(rs);
1253 brelse(bh);
1254 sb_set_blocksize(s, fs_blocksize);
1255 @@ -1532,8 +1532,8 @@ static int what_hash(struct super_block
1256 code = find_hash_out(s);
1257
1258 if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
1259 - /* detection has found the hash, and we must check against the
1260 - ** mount options
1261 + /* detection has found the hash, and we must check against the
1262 + ** mount options
1263 */
1264 if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
1265 reiserfs_warning(s, "reiserfs-2507",
1266 @@ -1565,7 +1565,7 @@ static int what_hash(struct super_block
1267 }
1268 }
1269
1270 - /* if we are mounted RW, and we have a new valid hash code, update
1271 + /* if we are mounted RW, and we have a new valid hash code, update
1272 ** the super
1273 */
1274 if (code != UNSET_HASH &&
1275 --- a/fs/reiserfs/tail_conversion.c
1276 +++ b/fs/reiserfs/tail_conversion.c
1277 @@ -46,7 +46,7 @@ int direct2indirect(struct reiserfs_tran
1278 /* Set the key to search for the place for new unfm pointer */
1279 make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
1280
1281 - // FIXME: we could avoid this
1282 + /* FIXME: we could avoid this */
1283 if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
1284 reiserfs_error(sb, "PAP-14030",
1285 "pasted or inserted byte exists in "
1286 --- a/include/linux/reiserfs_fs_sb.h
1287 +++ b/include/linux/reiserfs_fs_sb.h
1288 @@ -14,7 +14,7 @@ typedef enum {
1289 } reiserfs_super_block_flags;
1290
1291 /* struct reiserfs_super_block accessors/mutators
1292 - * since this is a disk structure, it will always be in
1293 + * since this is a disk structure, it will always be in
1294 * little endian format. */
1295 #define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
1296 #define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
1297 @@ -83,16 +83,16 @@ typedef enum {
1298
1299 /* LOGGING -- */
1300
1301 -/* These all interelate for performance.
1302 +/* These all interelate for performance.
1303 **
1304 -** If the journal block count is smaller than n transactions, you lose speed.
1305 +** If the journal block count is smaller than n transactions, you lose speed.
1306 ** I don't know what n is yet, I'm guessing 8-16.
1307 **
1308 ** typical transaction size depends on the application, how often fsync is
1309 -** called, and how many metadata blocks you dirty in a 30 second period.
1310 +** called, and how many metadata blocks you dirty in a 30 second period.
1311 ** The more small files (<16k) you use, the larger your transactions will
1312 ** be.
1313 -**
1314 +**
1315 ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
1316 ** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
1317 ** to prevent wrapping before dirty meta blocks get to disk.
1318 @@ -241,7 +241,7 @@ struct reiserfs_journal {
1319
1320 struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
1321 struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
1322 - struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
1323 + struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
1324 the transactions */
1325 struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
1326 int j_persistent_trans;
1327 @@ -425,7 +425,7 @@ enum reiserfs_mount_options {
1328 partition will be dealt with in a
1329 manner of 3.5.x */
1330
1331 -/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
1332 +/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
1333 ** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
1334 ** is not required. If the normal autodection code can't determine which
1335 ** hash to use (because both hashes had the same value for a file)