]> err.no Git - linux-2.6/blob - fs/gfs2/eattr.c
187fba1c4678f61adb54476ef372be58f0213f9a
[linux-2.6] / fs / gfs2 / eattr.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/xattr.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <asm/uaccess.h>
18
19 #include "gfs2.h"
20 #include "lm_interface.h"
21 #include "incore.h"
22 #include "acl.h"
23 #include "eaops.h"
24 #include "eattr.h"
25 #include "glock.h"
26 #include "inode.h"
27 #include "meta_io.h"
28 #include "quota.h"
29 #include "rgrp.h"
30 #include "trans.h"
31 #include "util.h"
32
33 /**
34  * ea_calc_size - returns the acutal number of bytes the request will take up
35  *                (not counting any unstuffed data blocks)
36  * @sdp:
37  * @er:
38  * @size:
39  *
40  * Returns: 1 if the EA should be stuffed
41  */
42
43 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
44                         unsigned int *size)
45 {
46         *size = GFS2_EAREQ_SIZE_STUFFED(er);
47         if (*size <= sdp->sd_jbsize)
48                 return 1;
49
50         *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
51
52         return 0;
53 }
54
55 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
56 {
57         unsigned int size;
58
59         if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
60                 return -ERANGE;
61
62         ea_calc_size(sdp, er, &size);
63
64         /* This can only happen with 512 byte blocks */
65         if (size > sdp->sd_jbsize)
66                 return -ERANGE;
67
68         return 0;
69 }
70
71 typedef int (*ea_call_t) (struct gfs2_inode *ip,
72                           struct buffer_head *bh,
73                           struct gfs2_ea_header *ea,
74                           struct gfs2_ea_header *prev,
75                           void *private);
76
77 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
78                         ea_call_t ea_call, void *data)
79 {
80         struct gfs2_ea_header *ea, *prev = NULL;
81         int error = 0;
82
83         if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_EA))
84                 return -EIO;
85
86         for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
87                 if (!GFS2_EA_REC_LEN(ea))
88                         goto fail;
89                 if (!(bh->b_data <= (char *)ea &&
90                       (char *)GFS2_EA2NEXT(ea) <=
91                       bh->b_data + bh->b_size))
92                         goto fail;
93                 if (!GFS2_EATYPE_VALID(ea->ea_type))
94                         goto fail;
95
96                 error = ea_call(ip, bh, ea, prev, data);
97                 if (error)
98                         return error;
99
100                 if (GFS2_EA_IS_LAST(ea)) {
101                         if ((char *)GFS2_EA2NEXT(ea) !=
102                             bh->b_data + bh->b_size)
103                                 goto fail;
104                         break;
105                 }
106         }
107
108         return error;
109
110  fail:
111         gfs2_consist_inode(ip);
112         return -EIO;
113 }
114
115 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
116 {
117         struct buffer_head *bh, *eabh;
118         uint64_t *eablk, *end;
119         int error;
120
121         error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
122                                DIO_START | DIO_WAIT, &bh);
123         if (error)
124                 return error;
125
126         if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
127                 error = ea_foreach_i(ip, bh, ea_call, data);
128                 goto out;
129         }
130
131         if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_IN)) {
132                 error = -EIO;
133                 goto out;
134         }
135
136         eablk = (uint64_t *)(bh->b_data + sizeof(struct gfs2_meta_header));
137         end = eablk + ip->i_sbd->sd_inptrs;
138
139         for (; eablk < end; eablk++) {
140                 uint64_t bn;
141
142                 if (!*eablk)
143                         break;
144                 bn = be64_to_cpu(*eablk);
145
146                 error = gfs2_meta_read(ip->i_gl, bn, DIO_START | DIO_WAIT,
147                                        &eabh);
148                 if (error)
149                         break;
150                 error = ea_foreach_i(ip, eabh, ea_call, data);
151                 brelse(eabh);
152                 if (error)
153                         break;
154         }
155  out:
156         brelse(bh);
157
158         return error;
159 }
160
161 struct ea_find {
162         struct gfs2_ea_request *ef_er;
163         struct gfs2_ea_location *ef_el;
164 };
165
166 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
167                      struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
168                      void *private)
169 {
170         struct ea_find *ef = private;
171         struct gfs2_ea_request *er = ef->ef_er;
172
173         if (ea->ea_type == GFS2_EATYPE_UNUSED)
174                 return 0;
175
176         if (ea->ea_type == er->er_type) {
177                 if (ea->ea_name_len == er->er_name_len &&
178                     !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
179                         struct gfs2_ea_location *el = ef->ef_el;
180                         get_bh(bh);
181                         el->el_bh = bh;
182                         el->el_ea = ea;
183                         el->el_prev = prev;
184                         return 1;
185                 }
186         }
187
188 #if 0
189         else if ((ip->i_di.di_flags & GFS2_DIF_EA_PACKED) &&
190                  er->er_type == GFS2_EATYPE_SYS)
191                 return 1;
192 #endif
193
194         return 0;
195 }
196
197 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
198                  struct gfs2_ea_location *el)
199 {
200         struct ea_find ef;
201         int error;
202
203         ef.ef_er = er;
204         ef.ef_el = el;
205
206         memset(el, 0, sizeof(struct gfs2_ea_location));
207
208         error = ea_foreach(ip, ea_find_i, &ef);
209         if (error > 0)
210                 return 0;
211
212         return error;
213 }
214
215 /**
216  * ea_dealloc_unstuffed -
217  * @ip:
218  * @bh:
219  * @ea:
220  * @prev:
221  * @private:
222  *
223  * Take advantage of the fact that all unstuffed blocks are
224  * allocated from the same RG.  But watch, this may not always
225  * be true.
226  *
227  * Returns: errno
228  */
229
230 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
231                                 struct gfs2_ea_header *ea,
232                                 struct gfs2_ea_header *prev, void *private)
233 {
234         int *leave = private;
235         struct gfs2_sbd *sdp = ip->i_sbd;
236         struct gfs2_rgrpd *rgd;
237         struct gfs2_holder rg_gh;
238         struct buffer_head *dibh;
239         uint64_t *dataptrs, bn = 0;
240         uint64_t bstart = 0;
241         unsigned int blen = 0;
242         unsigned int blks = 0;
243         unsigned int x;
244         int error;
245
246         if (GFS2_EA_IS_STUFFED(ea))
247                 return 0;
248
249         dataptrs = GFS2_EA2DATAPTRS(ea);
250         for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++)
251                 if (*dataptrs) {
252                         blks++;
253                         bn = be64_to_cpu(*dataptrs);
254                 }
255         if (!blks)
256                 return 0;
257
258         rgd = gfs2_blk2rgrpd(sdp, bn);
259         if (!rgd) {
260                 gfs2_consist_inode(ip);
261                 return -EIO;
262         }
263
264         error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
265         if (error)
266                 return error;
267
268         error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length +
269                                  RES_DINODE + RES_EATTR + RES_STATFS +
270                                  RES_QUOTA, blks);
271         if (error)
272                 goto out_gunlock;
273
274         gfs2_trans_add_bh(ip->i_gl, bh, 1);
275
276         dataptrs = GFS2_EA2DATAPTRS(ea);
277         for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
278                 if (!*dataptrs)
279                         break;
280                 bn = be64_to_cpu(*dataptrs);
281
282                 if (bstart + blen == bn)
283                         blen++;
284                 else {
285                         if (bstart)
286                                 gfs2_free_meta(ip, bstart, blen);
287                         bstart = bn;
288                         blen = 1;
289                 }
290
291                 *dataptrs = 0;
292                 if (!ip->i_di.di_blocks)
293                         gfs2_consist_inode(ip);
294                 ip->i_di.di_blocks--;
295         }
296         if (bstart)
297                 gfs2_free_meta(ip, bstart, blen);
298
299         if (prev && !leave) {
300                 uint32_t len;
301
302                 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
303                 prev->ea_rec_len = cpu_to_be32(len);
304
305                 if (GFS2_EA_IS_LAST(ea))
306                         prev->ea_flags |= GFS2_EAFLAG_LAST;
307         } else {
308                 ea->ea_type = GFS2_EATYPE_UNUSED;
309                 ea->ea_num_ptrs = 0;
310         }
311
312         error = gfs2_meta_inode_buffer(ip, &dibh);
313         if (!error) {
314                 ip->i_di.di_ctime = get_seconds();
315                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
316                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
317                 brelse(dibh);
318         }
319
320         gfs2_trans_end(sdp);
321
322  out_gunlock:
323         gfs2_glock_dq_uninit(&rg_gh);
324
325         return error;
326 }
327
328 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
329                                struct gfs2_ea_header *ea,
330                                struct gfs2_ea_header *prev, int leave)
331 {
332         struct gfs2_alloc *al;
333         int error;
334
335         al = gfs2_alloc_get(ip);
336
337         error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
338         if (error)
339                 goto out_alloc;
340
341         error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
342         if (error)
343                 goto out_quota;
344
345         error = ea_dealloc_unstuffed(ip,
346                                      bh, ea, prev,
347                                      (leave) ? &error : NULL);
348
349         gfs2_glock_dq_uninit(&al->al_ri_gh);
350
351  out_quota:
352         gfs2_quota_unhold(ip);
353
354  out_alloc:
355         gfs2_alloc_put(ip);
356
357         return error;
358 }
359
360 struct ea_list {
361         struct gfs2_ea_request *ei_er;
362         unsigned int ei_size;
363 };
364
365 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
366                      struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
367                      void *private)
368 {
369         struct ea_list *ei = private;
370         struct gfs2_ea_request *er = ei->ei_er;
371         unsigned int ea_size = gfs2_ea_strlen(ea);
372
373         if (ea->ea_type == GFS2_EATYPE_UNUSED)
374                 return 0;
375
376         if (er->er_data_len) {
377                 char *prefix;
378                 unsigned int l;
379                 char c = 0;
380
381                 if (ei->ei_size + ea_size > er->er_data_len)
382                         return -ERANGE;
383
384                 switch (ea->ea_type) {
385                 case GFS2_EATYPE_USR:
386                         prefix = "user.";
387                         l = 5;
388                         break;
389                 case GFS2_EATYPE_SYS:
390                         prefix = "system.";
391                         l = 7;
392                         break;
393                 case GFS2_EATYPE_SECURITY:
394                         prefix = "security.";
395                         l = 9;
396                         break;
397                 default:
398                         break;
399                 }
400
401                 memcpy(er->er_data + ei->ei_size,
402                        prefix, l);
403                 memcpy(er->er_data + ei->ei_size + l,
404                        GFS2_EA2NAME(ea),
405                        ea->ea_name_len);
406                 memcpy(er->er_data + ei->ei_size +
407                        ea_size - 1,
408                        &c, 1);
409         }
410
411         ei->ei_size += ea_size;
412
413         return 0;
414 }
415
416 /**
417  * gfs2_ea_list -
418  * @ip:
419  * @er:
420  *
421  * Returns: actual size of data on success, -errno on error
422  */
423
424 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
425 {
426         struct gfs2_holder i_gh;
427         int error;
428
429         if (!er->er_data || !er->er_data_len) {
430                 er->er_data = NULL;
431                 er->er_data_len = 0;
432         }
433
434         error = gfs2_glock_nq_init(ip->i_gl,
435                                   LM_ST_SHARED, LM_FLAG_ANY,
436                                   &i_gh);
437         if (error)
438                 return error;
439
440         if (ip->i_di.di_eattr) {
441                 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
442
443                 error = ea_foreach(ip, ea_list_i, &ei);
444                 if (!error)
445                         error = ei.ei_size;
446         }
447
448         gfs2_glock_dq_uninit(&i_gh);
449
450         return error;
451 }
452
453 /**
454  * ea_get_unstuffed - actually copies the unstuffed data into the
455  *                    request buffer
456  * @ip:
457  * @ea:
458  * @data:
459  *
460  * Returns: errno
461  */
462
463 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
464                             char *data)
465 {
466         struct gfs2_sbd *sdp = ip->i_sbd;
467         struct buffer_head **bh;
468         unsigned int amount = GFS2_EA_DATA_LEN(ea);
469         unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
470         uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
471         unsigned int x;
472         int error = 0;
473
474         bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
475         if (!bh)
476                 return -ENOMEM;
477
478         for (x = 0; x < nptrs; x++) {
479                 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
480                                        DIO_START, bh + x);
481                 if (error) {
482                         while (x--)
483                                 brelse(bh[x]);
484                         goto out;
485                 }
486                 dataptrs++;
487         }
488
489         for (x = 0; x < nptrs; x++) {
490                 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
491                 if (error) {
492                         for (; x < nptrs; x++)
493                                 brelse(bh[x]);
494                         goto out;
495                 }
496                 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
497                         for (; x < nptrs; x++)
498                                 brelse(bh[x]);
499                         error = -EIO;
500                         goto out;
501                 }
502
503                 memcpy(data,
504                        bh[x]->b_data + sizeof(struct gfs2_meta_header),
505                        (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
506
507                 amount -= sdp->sd_jbsize;
508                 data += sdp->sd_jbsize;
509
510                 brelse(bh[x]);
511         }
512
513  out:
514         kfree(bh);
515
516         return error;
517 }
518
519 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
520                      char *data)
521 {
522         if (GFS2_EA_IS_STUFFED(el->el_ea)) {
523                 memcpy(data,
524                        GFS2_EA2DATA(el->el_ea),
525                        GFS2_EA_DATA_LEN(el->el_ea));
526                 return 0;
527         } else
528                 return ea_get_unstuffed(ip, el->el_ea, data);
529 }
530
531 /**
532  * gfs2_ea_get_i -
533  * @ip:
534  * @er:
535  *
536  * Returns: actual size of data on success, -errno on error
537  */
538
539 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
540 {
541         struct gfs2_ea_location el;
542         int error;
543
544         if (!ip->i_di.di_eattr)
545                 return -ENODATA;
546
547         error = gfs2_ea_find(ip, er, &el);
548         if (error)
549                 return error;
550         if (!el.el_ea)
551                 return -ENODATA;
552
553         if (er->er_data_len) {
554                 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
555                         error =  -ERANGE;
556                 else
557                         error = gfs2_ea_get_copy(ip, &el, er->er_data);
558         }
559         if (!error)
560                 error = GFS2_EA_DATA_LEN(el.el_ea);
561
562         brelse(el.el_bh);
563
564         return error;
565 }
566
567 /**
568  * gfs2_ea_get -
569  * @ip:
570  * @er:
571  *
572  * Returns: actual size of data on success, -errno on error
573  */
574
575 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
576 {
577         struct gfs2_holder i_gh;
578         int error;
579
580         if (!er->er_name_len ||
581             er->er_name_len > GFS2_EA_MAX_NAME_LEN)
582                 return -EINVAL;
583         if (!er->er_data || !er->er_data_len) {
584                 er->er_data = NULL;
585                 er->er_data_len = 0;
586         }
587
588         error = gfs2_glock_nq_init(ip->i_gl,
589                                   LM_ST_SHARED, LM_FLAG_ANY,
590                                   &i_gh);
591         if (error)
592                 return error;
593
594         error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
595
596         gfs2_glock_dq_uninit(&i_gh);
597
598         return error;
599 }
600
601 /**
602  * ea_alloc_blk - allocates a new block for extended attributes.
603  * @ip: A pointer to the inode that's getting extended attributes
604  * @bhp:
605  *
606  * Returns: errno
607  */
608
609 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
610 {
611         struct gfs2_sbd *sdp = ip->i_sbd;
612         struct gfs2_ea_header *ea;
613         uint64_t block;
614
615         block = gfs2_alloc_meta(ip);
616
617         *bhp = gfs2_meta_new(ip->i_gl, block);
618         gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
619         gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
620         gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
621
622         ea = GFS2_EA_BH2FIRST(*bhp);
623         ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
624         ea->ea_type = GFS2_EATYPE_UNUSED;
625         ea->ea_flags = GFS2_EAFLAG_LAST;
626         ea->ea_num_ptrs = 0;
627
628         ip->i_di.di_blocks++;
629
630         return 0;
631 }
632
633 /**
634  * ea_write - writes the request info to an ea, creating new blocks if
635  *            necessary
636  * @ip:  inode that is being modified
637  * @ea:  the location of the new ea in a block
638  * @er: the write request
639  *
640  * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
641  *
642  * returns : errno
643  */
644
645 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
646                     struct gfs2_ea_request *er)
647 {
648         struct gfs2_sbd *sdp = ip->i_sbd;
649
650         ea->ea_data_len = cpu_to_be32(er->er_data_len);
651         ea->ea_name_len = er->er_name_len;
652         ea->ea_type = er->er_type;
653         ea->__pad = 0;
654
655         memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
656
657         if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
658                 ea->ea_num_ptrs = 0;
659                 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
660         } else {
661                 uint64_t *dataptr = GFS2_EA2DATAPTRS(ea);
662                 const char *data = er->er_data;
663                 unsigned int data_len = er->er_data_len;
664                 unsigned int copy;
665                 unsigned int x;
666
667                 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
668                 for (x = 0; x < ea->ea_num_ptrs; x++) {
669                         struct buffer_head *bh;
670                         uint64_t block;
671                         int mh_size = sizeof(struct gfs2_meta_header);
672
673                         block = gfs2_alloc_meta(ip);
674
675                         bh = gfs2_meta_new(ip->i_gl, block);
676                         gfs2_trans_add_bh(ip->i_gl, bh, 1);
677                         gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
678
679                         ip->i_di.di_blocks++;
680
681                         copy = (data_len > sdp->sd_jbsize) ? sdp->sd_jbsize :
682                                                              data_len;
683                         memcpy(bh->b_data + mh_size, data, copy);
684                         if (copy < sdp->sd_jbsize)
685                                 memset(bh->b_data + mh_size + copy, 0,
686                                        sdp->sd_jbsize - copy);
687
688                         *dataptr++ = cpu_to_be64((uint64_t)bh->b_blocknr);
689                         data += copy;
690                         data_len -= copy;
691
692                         brelse(bh);
693                 }
694
695                 gfs2_assert_withdraw(sdp, !data_len);
696         }
697
698         return 0;
699 }
700
701 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
702                                    struct gfs2_ea_request *er,
703                                    void *private);
704
705 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
706                              unsigned int blks,
707                              ea_skeleton_call_t skeleton_call,
708                              void *private)
709 {
710         struct gfs2_alloc *al;
711         struct buffer_head *dibh;
712         int error;
713
714         al = gfs2_alloc_get(ip);
715
716         error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
717         if (error)
718                 goto out;
719
720         error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
721         if (error)
722                 goto out_gunlock_q;
723
724         al->al_requested = blks;
725
726         error = gfs2_inplace_reserve(ip);
727         if (error)
728                 goto out_gunlock_q;
729
730         error = gfs2_trans_begin(ip->i_sbd,
731                                  blks + al->al_rgd->rd_ri.ri_length +
732                                  RES_DINODE + RES_STATFS + RES_QUOTA, 0);
733         if (error)
734                 goto out_ipres;
735
736         error = skeleton_call(ip, er, private);
737         if (error)
738                 goto out_end_trans;
739
740         error = gfs2_meta_inode_buffer(ip, &dibh);
741         if (!error) {
742                 if (er->er_flags & GFS2_ERF_MODE) {
743                         gfs2_assert_withdraw(ip->i_sbd,
744                                             (ip->i_di.di_mode & S_IFMT) ==
745                                             (er->er_mode & S_IFMT));
746                         ip->i_di.di_mode = er->er_mode;
747                 }
748                 ip->i_di.di_ctime = get_seconds();
749                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
750                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
751                 brelse(dibh);
752         }
753
754  out_end_trans:
755         gfs2_trans_end(ip->i_sbd);
756
757  out_ipres:
758         gfs2_inplace_release(ip);
759
760  out_gunlock_q:
761         gfs2_quota_unlock(ip);
762
763  out:
764         gfs2_alloc_put(ip);
765
766         return error;
767 }
768
769 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
770                      void *private)
771 {
772         struct buffer_head *bh;
773         int error;
774
775         error = ea_alloc_blk(ip, &bh);
776         if (error)
777                 return error;
778
779         ip->i_di.di_eattr = bh->b_blocknr;
780         error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
781
782         brelse(bh);
783
784         return error;
785 }
786
787 /**
788  * ea_init - initializes a new eattr block
789  * @ip:
790  * @er:
791  *
792  * Returns: errno
793  */
794
795 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
796 {
797         unsigned int jbsize = ip->i_sbd->sd_jbsize;
798         unsigned int blks = 1;
799
800         if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
801                 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
802
803         return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
804 }
805
806 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
807 {
808         uint32_t ea_size = GFS2_EA_SIZE(ea);
809         struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
810                                      ea_size);
811         uint32_t new_size = GFS2_EA_REC_LEN(ea) - ea_size;
812         int last = ea->ea_flags & GFS2_EAFLAG_LAST;
813
814         ea->ea_rec_len = cpu_to_be32(ea_size);
815         ea->ea_flags ^= last;
816
817         new->ea_rec_len = cpu_to_be32(new_size);
818         new->ea_flags = last;
819
820         return new;
821 }
822
823 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
824                                   struct gfs2_ea_location *el)
825 {
826         struct gfs2_ea_header *ea = el->el_ea;
827         struct gfs2_ea_header *prev = el->el_prev;
828         uint32_t len;
829
830         gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
831
832         if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
833                 ea->ea_type = GFS2_EATYPE_UNUSED;
834                 return;
835         } else if (GFS2_EA2NEXT(prev) != ea) {
836                 prev = GFS2_EA2NEXT(prev);
837                 gfs2_assert_withdraw(ip->i_sbd, GFS2_EA2NEXT(prev) == ea);
838         }
839
840         len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
841         prev->ea_rec_len = cpu_to_be32(len);
842
843         if (GFS2_EA_IS_LAST(ea))
844                 prev->ea_flags |= GFS2_EAFLAG_LAST;
845 }
846
847 struct ea_set {
848         int ea_split;
849
850         struct gfs2_ea_request *es_er;
851         struct gfs2_ea_location *es_el;
852
853         struct buffer_head *es_bh;
854         struct gfs2_ea_header *es_ea;
855 };
856
857 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
858                                  struct gfs2_ea_header *ea, struct ea_set *es)
859 {
860         struct gfs2_ea_request *er = es->es_er;
861         struct buffer_head *dibh;
862         int error;
863
864         error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + 2 * RES_EATTR, 0);
865         if (error)
866                 return error;
867
868         gfs2_trans_add_bh(ip->i_gl, bh, 1);
869
870         if (es->ea_split)
871                 ea = ea_split_ea(ea);
872
873         ea_write(ip, ea, er);
874
875         if (es->es_el)
876                 ea_set_remove_stuffed(ip, es->es_el);
877
878         error = gfs2_meta_inode_buffer(ip, &dibh);
879         if (error)
880                 goto out;
881
882         if (er->er_flags & GFS2_ERF_MODE) {
883                 gfs2_assert_withdraw(ip->i_sbd,
884                         (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
885                 ip->i_di.di_mode = er->er_mode;
886         }
887         ip->i_di.di_ctime = get_seconds();
888         gfs2_trans_add_bh(ip->i_gl, dibh, 1);
889         gfs2_dinode_out(&ip->i_di, dibh->b_data);
890         brelse(dibh);
891  out:
892         gfs2_trans_end(ip->i_sbd);
893
894         return error;
895 }
896
897 static int ea_set_simple_alloc(struct gfs2_inode *ip,
898                                struct gfs2_ea_request *er, void *private)
899 {
900         struct ea_set *es = private;
901         struct gfs2_ea_header *ea = es->es_ea;
902         int error;
903
904         gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
905
906         if (es->ea_split)
907                 ea = ea_split_ea(ea);
908
909         error = ea_write(ip, ea, er);
910         if (error)
911                 return error;
912
913         if (es->es_el)
914                 ea_set_remove_stuffed(ip, es->es_el);
915
916         return 0;
917 }
918
919 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
920                          struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
921                          void *private)
922 {
923         struct ea_set *es = private;
924         unsigned int size;
925         int stuffed;
926         int error;
927
928         stuffed = ea_calc_size(ip->i_sbd, es->es_er, &size);
929
930         if (ea->ea_type == GFS2_EATYPE_UNUSED) {
931                 if (GFS2_EA_REC_LEN(ea) < size)
932                         return 0;
933                 if (!GFS2_EA_IS_STUFFED(ea)) {
934                         error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
935                         if (error)
936                                 return error;
937                 }
938                 es->ea_split = 0;
939         } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
940                 es->ea_split = 1;
941         else
942                 return 0;
943
944         if (stuffed) {
945                 error = ea_set_simple_noalloc(ip, bh, ea, es);
946                 if (error)
947                         return error;
948         } else {
949                 unsigned int blks;
950
951                 es->es_bh = bh;
952                 es->es_ea = ea;
953                 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
954                                         ip->i_sbd->sd_jbsize);
955
956                 error = ea_alloc_skeleton(ip, es->es_er, blks,
957                                           ea_set_simple_alloc, es);
958                 if (error)
959                         return error;
960         }
961
962         return 1;
963 }
964
965 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
966                         void *private)
967 {
968         struct gfs2_sbd *sdp = ip->i_sbd;
969         struct buffer_head *indbh, *newbh;
970         uint64_t *eablk;
971         int error;
972         int mh_size = sizeof(struct gfs2_meta_header);
973
974         if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
975                 uint64_t *end;
976
977                 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
978                                        DIO_START | DIO_WAIT, &indbh);
979                 if (error)
980                         return error;
981
982                 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
983                         error = -EIO;
984                         goto out;
985                 }
986
987                 eablk = (uint64_t *)(indbh->b_data + mh_size);
988                 end = eablk + sdp->sd_inptrs;
989
990                 for (; eablk < end; eablk++)
991                         if (!*eablk)
992                                 break;
993
994                 if (eablk == end) {
995                         error = -ENOSPC;
996                         goto out;
997                 }
998
999                 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1000         } else {
1001                 uint64_t blk;
1002
1003                 blk = gfs2_alloc_meta(ip);
1004
1005                 indbh = gfs2_meta_new(ip->i_gl, blk);
1006                 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1007                 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1008                 gfs2_buffer_clear_tail(indbh, mh_size);
1009
1010                 eablk = (uint64_t *)(indbh->b_data + mh_size);
1011                 *eablk = cpu_to_be64(ip->i_di.di_eattr);
1012                 ip->i_di.di_eattr = blk;
1013                 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
1014                 ip->i_di.di_blocks++;
1015
1016                 eablk++;
1017         }
1018
1019         error = ea_alloc_blk(ip, &newbh);
1020         if (error)
1021                 goto out;
1022
1023         *eablk = cpu_to_be64((uint64_t)newbh->b_blocknr);
1024         error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1025         brelse(newbh);
1026         if (error)
1027                 goto out;
1028
1029         if (private)
1030                 ea_set_remove_stuffed(ip, (struct gfs2_ea_location *)private);
1031
1032  out:
1033         brelse(indbh);
1034
1035         return error;
1036 }
1037
1038 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1039                     struct gfs2_ea_location *el)
1040 {
1041         struct ea_set es;
1042         unsigned int blks = 2;
1043         int error;
1044
1045         memset(&es, 0, sizeof(struct ea_set));
1046         es.es_er = er;
1047         es.es_el = el;
1048
1049         error = ea_foreach(ip, ea_set_simple, &es);
1050         if (error > 0)
1051                 return 0;
1052         if (error)
1053                 return error;
1054
1055         if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1056                 blks++;
1057         if (GFS2_EAREQ_SIZE_STUFFED(er) > ip->i_sbd->sd_jbsize)
1058                 blks += DIV_ROUND_UP(er->er_data_len, ip->i_sbd->sd_jbsize);
1059
1060         return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1061 }
1062
1063 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1064                                    struct gfs2_ea_location *el)
1065 {
1066         if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1067                 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1068                 gfs2_assert_withdraw(ip->i_sbd,
1069                                      GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1070         }
1071
1072         return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1073 }
1074
1075 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1076 {
1077         struct gfs2_ea_location el;
1078         int error;
1079
1080         if (!ip->i_di.di_eattr) {
1081                 if (er->er_flags & XATTR_REPLACE)
1082                         return -ENODATA;
1083                 return ea_init(ip, er);
1084         }
1085
1086         error = gfs2_ea_find(ip, er, &el);
1087         if (error)
1088                 return error;
1089
1090         if (el.el_ea) {
1091                 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1092                         brelse(el.el_bh);
1093                         return -EPERM;
1094                 }
1095
1096                 error = -EEXIST;
1097                 if (!(er->er_flags & XATTR_CREATE)) {
1098                         int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1099                         error = ea_set_i(ip, er, &el);
1100                         if (!error && unstuffed)
1101                                 ea_set_remove_unstuffed(ip, &el);
1102                 }
1103
1104                 brelse(el.el_bh);
1105         } else {
1106                 error = -ENODATA;
1107                 if (!(er->er_flags & XATTR_REPLACE))
1108                         error = ea_set_i(ip, er, NULL);
1109         }
1110
1111         return error;
1112 }
1113
1114 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1115 {
1116         struct gfs2_holder i_gh;
1117         int error;
1118
1119         if (!er->er_name_len ||
1120             er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1121                 return -EINVAL;
1122         if (!er->er_data || !er->er_data_len) {
1123                 er->er_data = NULL;
1124                 er->er_data_len = 0;
1125         }
1126         error = ea_check_size(ip->i_sbd, er);
1127         if (error)
1128                 return error;
1129
1130         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1131         if (error)
1132                 return error;
1133
1134         if (IS_IMMUTABLE(ip->i_vnode))
1135                 error = -EPERM;
1136         else
1137                 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1138
1139         gfs2_glock_dq_uninit(&i_gh);
1140
1141         return error;
1142 }
1143
1144 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1145 {
1146         struct gfs2_ea_header *ea = el->el_ea;
1147         struct gfs2_ea_header *prev = el->el_prev;
1148         struct buffer_head *dibh;
1149         int error;
1150
1151         error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1152         if (error)
1153                 return error;
1154
1155         gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1156
1157         if (prev) {
1158                 uint32_t len;
1159
1160                 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1161                 prev->ea_rec_len = cpu_to_be32(len);
1162
1163                 if (GFS2_EA_IS_LAST(ea))
1164                         prev->ea_flags |= GFS2_EAFLAG_LAST;
1165         } else
1166                 ea->ea_type = GFS2_EATYPE_UNUSED;
1167
1168         error = gfs2_meta_inode_buffer(ip, &dibh);
1169         if (!error) {
1170                 ip->i_di.di_ctime = get_seconds();
1171                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1172                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1173                 brelse(dibh);
1174         }       
1175
1176         gfs2_trans_end(ip->i_sbd);
1177
1178         return error;
1179 }
1180
1181 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1182 {
1183         struct gfs2_ea_location el;
1184         int error;
1185
1186         if (!ip->i_di.di_eattr)
1187                 return -ENODATA;
1188
1189         error = gfs2_ea_find(ip, er, &el);
1190         if (error)
1191                 return error;
1192         if (!el.el_ea)
1193                 return -ENODATA;
1194
1195         if (GFS2_EA_IS_STUFFED(el.el_ea))
1196                 error = ea_remove_stuffed(ip, &el);
1197         else
1198                 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1199                                             0);
1200
1201         brelse(el.el_bh);
1202
1203         return error;
1204 }
1205
1206 /**
1207  * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1208  * @ip: pointer to the inode of the target file
1209  * @er: request information
1210  *
1211  * Returns: errno
1212  */
1213
1214 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1215 {
1216         struct gfs2_holder i_gh;
1217         int error;
1218
1219         if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1220                 return -EINVAL;
1221
1222         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1223         if (error)
1224                 return error;
1225
1226         if (IS_IMMUTABLE(ip->i_vnode) || IS_APPEND(ip->i_vnode))
1227                 error = -EPERM;
1228         else
1229                 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1230
1231         gfs2_glock_dq_uninit(&i_gh);
1232
1233         return error;
1234 }
1235
1236 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1237                                   struct gfs2_ea_header *ea, char *data)
1238 {
1239         struct gfs2_sbd *sdp = ip->i_sbd;
1240         struct buffer_head **bh;
1241         unsigned int amount = GFS2_EA_DATA_LEN(ea);
1242         unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1243         uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
1244         unsigned int x;
1245         int error;
1246
1247         bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1248         if (!bh)
1249                 return -ENOMEM;
1250
1251         error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1252         if (error)
1253                 goto out;
1254
1255         for (x = 0; x < nptrs; x++) {
1256                 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
1257                                        DIO_START, bh + x);
1258                 if (error) {
1259                         while (x--)
1260                                 brelse(bh[x]);
1261                         goto fail;
1262                 }
1263                 dataptrs++;
1264         }
1265
1266         for (x = 0; x < nptrs; x++) {
1267                 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
1268                 if (error) {
1269                         for (; x < nptrs; x++)
1270                                 brelse(bh[x]);
1271                         goto fail;
1272                 }
1273                 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1274                         for (; x < nptrs; x++)
1275                                 brelse(bh[x]);
1276                         error = -EIO;
1277                         goto fail;
1278                 }
1279
1280                 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1281
1282                 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header),
1283                        data,
1284                        (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1285
1286                 amount -= sdp->sd_jbsize;
1287                 data += sdp->sd_jbsize;
1288
1289                 brelse(bh[x]);
1290         }
1291
1292  out:
1293         kfree(bh);
1294
1295         return error;
1296
1297  fail:
1298         gfs2_trans_end(sdp);
1299         kfree(bh);
1300
1301         return error;
1302 }
1303
1304 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1305                       struct iattr *attr, char *data)
1306 {
1307         struct buffer_head *dibh;
1308         int error;
1309
1310         if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1311                 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1312                 if (error)
1313                         return error;
1314
1315                 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1316                 memcpy(GFS2_EA2DATA(el->el_ea),
1317                        data,
1318                        GFS2_EA_DATA_LEN(el->el_ea));
1319         } else
1320                 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1321
1322         if (error)
1323                 return error;
1324
1325         error = gfs2_meta_inode_buffer(ip, &dibh);
1326         if (!error) {
1327                 error = inode_setattr(ip->i_vnode, attr);
1328                 gfs2_assert_warn(ip->i_sbd, !error);
1329                 gfs2_inode_attr_out(ip);
1330                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1331                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1332                 brelse(dibh);
1333         }
1334
1335         gfs2_trans_end(ip->i_sbd);
1336
1337         return error;
1338 }
1339
1340 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1341 {
1342         struct gfs2_sbd *sdp = ip->i_sbd;
1343         struct gfs2_rgrp_list rlist;
1344         struct buffer_head *indbh, *dibh;
1345         uint64_t *eablk, *end;
1346         unsigned int rg_blocks = 0;
1347         uint64_t bstart = 0;
1348         unsigned int blen = 0;
1349         unsigned int blks = 0;
1350         unsigned int x;
1351         int error;
1352
1353         memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1354
1355         error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
1356                                DIO_START | DIO_WAIT, &indbh);
1357         if (error)
1358                 return error;
1359
1360         if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1361                 error = -EIO;
1362                 goto out;
1363         }
1364
1365         eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1366         end = eablk + sdp->sd_inptrs;
1367
1368         for (; eablk < end; eablk++) {
1369                 uint64_t bn;
1370
1371                 if (!*eablk)
1372                         break;
1373                 bn = be64_to_cpu(*eablk);
1374
1375                 if (bstart + blen == bn)
1376                         blen++;
1377                 else {
1378                         if (bstart)
1379                                 gfs2_rlist_add(sdp, &rlist, bstart);
1380                         bstart = bn;
1381                         blen = 1;
1382                 }
1383                 blks++;
1384         }
1385         if (bstart)
1386                 gfs2_rlist_add(sdp, &rlist, bstart);
1387         else
1388                 goto out;
1389
1390         gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1391
1392         for (x = 0; x < rlist.rl_rgrps; x++) {
1393                 struct gfs2_rgrpd *rgd;
1394                 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1395                 rg_blocks += rgd->rd_ri.ri_length;
1396         }
1397
1398         error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1399         if (error)
1400                 goto out_rlist_free;
1401
1402         error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
1403                                  RES_INDIRECT + RES_STATFS +
1404                                  RES_QUOTA, blks);
1405         if (error)
1406                 goto out_gunlock;
1407
1408         gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1409
1410         eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1411         bstart = 0;
1412         blen = 0;
1413
1414         for (; eablk < end; eablk++) {
1415                 uint64_t bn;
1416
1417                 if (!*eablk)
1418                         break;
1419                 bn = be64_to_cpu(*eablk);
1420
1421                 if (bstart + blen == bn)
1422                         blen++;
1423                 else {
1424                         if (bstart)
1425                                 gfs2_free_meta(ip, bstart, blen);
1426                         bstart = bn;
1427                         blen = 1;
1428                 }
1429
1430                 *eablk = 0;
1431                 if (!ip->i_di.di_blocks)
1432                         gfs2_consist_inode(ip);
1433                 ip->i_di.di_blocks--;
1434         }
1435         if (bstart)
1436                 gfs2_free_meta(ip, bstart, blen);
1437
1438         ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1439
1440         error = gfs2_meta_inode_buffer(ip, &dibh);
1441         if (!error) {
1442                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1443                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1444                 brelse(dibh);
1445         }
1446
1447         gfs2_trans_end(sdp);
1448
1449  out_gunlock:
1450         gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1451
1452  out_rlist_free:
1453         gfs2_rlist_free(&rlist);
1454
1455  out:
1456         brelse(indbh);
1457
1458         return error;
1459 }
1460
1461 static int ea_dealloc_block(struct gfs2_inode *ip)
1462 {
1463         struct gfs2_sbd *sdp = ip->i_sbd;
1464         struct gfs2_alloc *al = &ip->i_alloc;
1465         struct gfs2_rgrpd *rgd;
1466         struct buffer_head *dibh;
1467         int error;
1468
1469         rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1470         if (!rgd) {
1471                 gfs2_consist_inode(ip);
1472                 return -EIO;
1473         }
1474
1475         error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1476                                    &al->al_rgd_gh);
1477         if (error)
1478                 return error;
1479
1480         error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE +
1481                                  RES_STATFS + RES_QUOTA, 1);
1482         if (error)
1483                 goto out_gunlock;
1484
1485         gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1486
1487         ip->i_di.di_eattr = 0;
1488         if (!ip->i_di.di_blocks)
1489                 gfs2_consist_inode(ip);
1490         ip->i_di.di_blocks--;
1491
1492         error = gfs2_meta_inode_buffer(ip, &dibh);
1493         if (!error) {
1494                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1495                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1496                 brelse(dibh);
1497         }
1498
1499         gfs2_trans_end(sdp);
1500
1501  out_gunlock:
1502         gfs2_glock_dq_uninit(&al->al_rgd_gh);
1503
1504         return error;
1505 }
1506
1507 /**
1508  * gfs2_ea_dealloc - deallocate the extended attribute fork
1509  * @ip: the inode
1510  *
1511  * Returns: errno
1512  */
1513
1514 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1515 {
1516         struct gfs2_alloc *al;
1517         int error;
1518
1519         al = gfs2_alloc_get(ip);
1520
1521         error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1522         if (error)
1523                 goto out_alloc;
1524
1525         error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
1526         if (error)
1527                 goto out_quota;
1528
1529         error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1530         if (error)
1531                 goto out_rindex;
1532
1533         if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1534                 error = ea_dealloc_indirect(ip);
1535                 if (error)
1536                         goto out_rindex;
1537         }
1538
1539         error = ea_dealloc_block(ip);
1540
1541  out_rindex:
1542         gfs2_glock_dq_uninit(&al->al_ri_gh);
1543
1544  out_quota:
1545         gfs2_quota_unhold(ip);
1546
1547  out_alloc:
1548         gfs2_alloc_put(ip);
1549
1550         return error;
1551 }
1552