/[fuse_dbi]/fuse/trunk/kernel/file.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /fuse/trunk/kernel/file.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5 - (show annotations)
Wed Aug 4 11:40:49 2004 UTC (19 years, 8 months ago) by dpavlin
File MIME type: text/plain
File size: 22719 byte(s)
copy CVS to trunk

1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2004 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8 #include "fuse_i.h"
9
10 #include <linux/pagemap.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #ifdef KERNEL_2_6
14 #include <linux/backing-dev.h>
15 #include <linux/writeback.h>
16 #endif
17
18 #ifndef KERNEL_2_6
19 #define PageUptodate(page) Page_Uptodate(page)
20 #endif
21
22 static int user_mmap;
23 #ifdef KERNEL_2_6
24 #include <linux/moduleparam.h>
25 module_param(user_mmap, int, 0);
26 #else
27 MODULE_PARM(user_mmap, "i");
28 #endif
29
30 MODULE_PARM_DESC(user_mmap, "Allow non root user to create a shared writable mapping");
31
32
33 static int fuse_open(struct inode *inode, struct file *file)
34 {
35 struct fuse_conn *fc = INO_FC(inode);
36 struct fuse_req *req;
37 struct fuse_open_in inarg;
38 struct fuse_open_out outarg;
39 struct fuse_file *ff;
40 int err;
41
42 err = generic_file_open(inode, file);
43 if (err)
44 return err;
45
46 /* If opening the root node, no lookup has been performed on
47 it, so the attributes must be refreshed */
48 if (inode->i_ino == FUSE_ROOT_INO) {
49 int err = fuse_do_getattr(inode);
50 if (err)
51 return err;
52 }
53
54 down(&inode->i_sem);
55 err = -ERESTARTSYS;
56 req = fuse_get_request(fc);
57 if (!req)
58 goto out;
59
60 err = -ENOMEM;
61 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
62 if (!ff)
63 goto out_put_request;
64
65 ff->release_req = fuse_request_alloc();
66 if (!ff->release_req) {
67 kfree(ff);
68 goto out_put_request;
69 }
70
71
72 memset(&inarg, 0, sizeof(inarg));
73 inarg.flags = file->f_flags & ~O_EXCL;
74 req->in.h.opcode = FUSE_OPEN;
75 req->in.h.ino = inode->i_ino;
76 req->in.numargs = 1;
77 req->in.args[0].size = sizeof(inarg);
78 req->in.args[0].value = &inarg;
79 req->out.numargs = 1;
80 req->out.args[0].size = sizeof(outarg);
81 req->out.args[0].value = &outarg;
82 request_send(fc, req);
83 err = req->out.h.error;
84 if (!err && !(fc->flags & FUSE_KERNEL_CACHE)) {
85 #ifdef KERNEL_2_6
86 invalidate_inode_pages(inode->i_mapping);
87 #else
88 invalidate_inode_pages(inode);
89 #endif
90 }
91 if (err) {
92 fuse_request_free(ff->release_req);
93 kfree(ff);
94 }
95 else {
96 ff->fh = outarg.fh;
97 file->private_data = ff;
98 INIT_LIST_HEAD(&ff->ff_list);
99 }
100
101 out_put_request:
102 fuse_put_request(fc, req);
103 out:
104 up(&inode->i_sem);
105 return err;
106 }
107
108 void fuse_sync_inode(struct inode *inode)
109 {
110 #ifdef KERNEL_2_6
111 filemap_fdatawrite(inode->i_mapping);
112 filemap_fdatawait(inode->i_mapping);
113 #else
114 #ifndef NO_MM
115 filemap_fdatasync(inode->i_mapping);
116 filemap_fdatawait(inode->i_mapping);
117 #endif
118 #endif
119 }
120
121 static int fuse_release(struct inode *inode, struct file *file)
122 {
123 struct fuse_conn *fc = INO_FC(inode);
124 struct fuse_inode *fi = INO_FI(inode);
125 struct fuse_release_in *inarg;
126 struct fuse_file *ff = file->private_data;
127 struct fuse_req *req = ff->release_req;
128
129 down(&inode->i_sem);
130 if (file->f_mode & FMODE_WRITE)
131 fuse_sync_inode(inode);
132
133 if (!list_empty(&ff->ff_list)) {
134 down_write(&fi->write_sem);
135 list_del(&ff->ff_list);
136 up_write(&fi->write_sem);
137 }
138
139 inarg = &req->misc.release_in;
140 inarg->fh = ff->fh;
141 inarg->flags = file->f_flags & ~O_EXCL;
142 req->in.h.opcode = FUSE_RELEASE;
143 req->in.h.ino = inode->i_ino;
144 req->in.numargs = 1;
145 req->in.args[0].size = sizeof(struct fuse_release_in);
146 req->in.args[0].value = inarg;
147 request_send(fc, req);
148 fuse_put_request(fc, req);
149 kfree(ff);
150 up(&inode->i_sem);
151
152 /* Return value is ignored by VFS */
153 return 0;
154 }
155
156 static int fuse_flush(struct file *file)
157 {
158 struct inode *inode = file->f_dentry->d_inode;
159 struct fuse_conn *fc = INO_FC(inode);
160 struct fuse_file *ff = file->private_data;
161 struct fuse_req *req;
162 struct fuse_flush_in inarg;
163 int err;
164
165 if (fc->no_flush)
166 return 0;
167
168 req = fuse_get_request(fc);
169 if (!req)
170 return -EINTR;
171
172 memset(&inarg, 0, sizeof(inarg));
173 inarg.fh = ff->fh;
174 req->in.h.opcode = FUSE_FLUSH;
175 req->in.h.ino = inode->i_ino;
176 req->in.numargs = 1;
177 req->in.args[0].size = sizeof(inarg);
178 req->in.args[0].value = &inarg;
179 request_send(fc, req);
180 err = req->out.h.error;
181 if (err == -ENOSYS) {
182 fc->no_flush = 1;
183 err = 0;
184 }
185 fuse_put_request(fc, req);
186 return err;
187 }
188
189 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
190 {
191 struct inode *inode = de->d_inode;
192 struct fuse_inode *fi = INO_FI(inode);
193 struct fuse_conn *fc = INO_FC(inode);
194 struct fuse_file *ff = file->private_data;
195 struct fuse_req *req;
196 struct fuse_fsync_in inarg;
197 int err;
198
199 if (fc->no_fsync)
200 return 0;
201
202 req = fuse_get_request(fc);
203 if (!req)
204 return -ERESTARTSYS;
205
206 /* Make sure all writes to this inode are completed before
207 issuing the FSYNC request */
208 down_write(&fi->write_sem);
209 up_write(&fi->write_sem);
210
211 memset(&inarg, 0, sizeof(inarg));
212 inarg.fh = ff->fh;
213 inarg.datasync = datasync;
214 req->in.h.opcode = FUSE_FSYNC;
215 req->in.h.ino = inode->i_ino;
216 req->in.numargs = 1;
217 req->in.args[0].size = sizeof(inarg);
218 req->in.args[0].value = &inarg;
219 request_send(fc, req);
220 err = req->out.h.error;
221 if (err == -ENOSYS) {
222 fc->no_fsync = 1;
223 err = 0;
224 }
225 fuse_put_request(fc, req);
226 return err;
227 }
228
229 static ssize_t fuse_send_read(struct file *file, struct inode *inode,
230 char *buf, loff_t pos, size_t count)
231 {
232 struct fuse_conn *fc = INO_FC(inode);
233 struct fuse_file *ff = file->private_data;
234 struct fuse_req *req;
235 struct fuse_read_in inarg;
236 ssize_t res;
237
238 req = fuse_get_request(fc);
239 if (!req)
240 return -ERESTARTSYS;
241
242 memset(&inarg, 0, sizeof(inarg));
243 inarg.fh = ff->fh;
244 inarg.offset = pos;
245 inarg.size = count;
246 req->in.h.opcode = FUSE_READ;
247 req->in.h.ino = inode->i_ino;
248 req->in.numargs = 1;
249 req->in.args[0].size = sizeof(inarg);
250 req->in.args[0].value = &inarg;
251 req->out.argvar = 1;
252 req->out.numargs = 1;
253 req->out.args[0].size = count;
254 req->out.args[0].value = buf;
255 request_send(fc, req);
256 res = req->out.h.error;
257 if (!res)
258 res = req->out.args[0].size;
259 fuse_put_request(fc, req);
260 return res;
261 }
262
263
264 static int fuse_readpage(struct file *file, struct page *page)
265 {
266 struct inode *inode = page->mapping->host;
267 char *buffer;
268 ssize_t res;
269 loff_t pos;
270
271 pos = (loff_t) page->index << PAGE_CACHE_SHIFT;
272 buffer = kmap(page);
273 res = fuse_send_read(file, inode, buffer, pos, PAGE_CACHE_SIZE);
274 if (res >= 0) {
275 if (res < PAGE_CACHE_SIZE)
276 memset(buffer + res, 0, PAGE_CACHE_SIZE - res);
277 flush_dcache_page(page);
278 SetPageUptodate(page);
279 res = 0;
280 }
281 kunmap(page);
282 unlock_page(page);
283 return res;
284 }
285
286 #ifdef KERNEL_2_6
287
288 static int read_pages_copyout(struct fuse_req *req, const char *buf,
289 size_t nbytes)
290 {
291 unsigned i;
292 unsigned long base_index = req->pages[0]->index;
293 for (i = 0; i < req->num_pages; i++) {
294 struct page *page = req->pages[i];
295 unsigned long offset;
296 unsigned count;
297 char *tmpbuf;
298 int err;
299
300 offset = (page->index - base_index) * PAGE_CACHE_SIZE;
301 if (offset >= nbytes)
302 count = 0;
303 else if (offset + PAGE_CACHE_SIZE <= nbytes)
304 count = PAGE_CACHE_SIZE;
305 else
306 count = nbytes - offset;
307
308 tmpbuf = kmap(page);
309 err = 0;
310 if (count)
311 err = copy_from_user(tmpbuf, buf + offset, count);
312 if (count < PAGE_CACHE_SIZE)
313 memset(tmpbuf + count, 0, PAGE_CACHE_SIZE - count);
314 kunmap(page);
315 if (err)
316 return -EFAULT;
317
318 flush_dcache_page(page);
319 SetPageUptodate(page);
320 }
321 return 0;
322 }
323
324 static void read_pages_end(struct fuse_conn *fc, struct fuse_req *req)
325 {
326 unsigned i;
327
328 for (i = 0; i < req->num_pages; i++)
329 unlock_page(req->pages[i]);
330
331 fuse_put_request(fc, req);
332 }
333
334 static void fuse_send_readpages(struct fuse_req *req, struct file *file,
335 struct inode *inode)
336 {
337 struct fuse_conn *fc = INO_FC(inode);
338 struct fuse_file *ff = file->private_data;
339 struct fuse_read_in *inarg;
340 loff_t pos;
341 unsigned numpages;
342
343 pos = (loff_t) req->pages[0]->index << PAGE_CACHE_SHIFT;
344 /* Allow for holes between the pages */
345 numpages = req->pages[req->num_pages - 1]->index + 1
346 - req->pages[0]->index;
347
348 inarg = &req->misc.read_in;
349 inarg->fh = ff->fh;
350 inarg->offset = pos;
351 inarg->size = numpages * PAGE_CACHE_SIZE;
352 req->in.h.opcode = FUSE_READ;
353 req->in.h.ino = inode->i_ino;
354 req->in.numargs = 1;
355 req->in.args[0].size = sizeof(struct fuse_read_in);
356 req->in.args[0].value = inarg;
357 req->copy_out = read_pages_copyout;
358 request_send_nonblock(fc, req, read_pages_end, NULL);
359 }
360
361 struct fuse_readpages_data {
362 struct fuse_req *req;
363 struct file *file;
364 struct inode *inode;
365 };
366
367 static int fuse_readpages_fill(void *_data, struct page *page)
368 {
369 struct fuse_readpages_data *data = _data;
370 struct fuse_req *req = data->req;
371 struct inode *inode = data->inode;
372 struct fuse_conn *fc = INO_FC(inode);
373
374 if (req->num_pages &&
375 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
376 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
377 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
378 struct fuse_conn *fc = INO_FC(page->mapping->host);
379 fuse_send_readpages(req, data->file, inode);
380 data->req = req = fuse_get_request(fc);
381 }
382 req->pages[req->num_pages] = page;
383 req->num_pages ++;
384 return 0;
385 }
386
387 static int fuse_readpages(struct file *file, struct address_space *mapping,
388 struct list_head *pages, unsigned nr_pages)
389 {
390 struct inode *inode = mapping->host;
391 struct fuse_conn *fc = INO_FC(inode);
392 struct fuse_readpages_data data;
393
394 data.req = fuse_get_request(fc);
395 data.file = file;
396 data.inode = inode;
397
398 read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
399 if (data.req->num_pages)
400 fuse_send_readpages(data.req, file, inode);
401 else
402 fuse_put_request(fc, data.req);
403
404 return 0;
405 }
406 #endif
407
408 #ifndef KERNEL_2_6
409 static int fuse_is_block_uptodate(struct inode *inode, size_t bl_index)
410 {
411 size_t index = bl_index << FUSE_BLOCK_PAGE_SHIFT;
412 size_t end_index = ((bl_index + 1) << FUSE_BLOCK_PAGE_SHIFT) - 1;
413 size_t file_end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
414
415 if (end_index > file_end_index)
416 end_index = file_end_index;
417
418 for (; index <= end_index; index++) {
419 struct page *page = find_get_page(inode->i_mapping, index);
420
421 if (!page)
422 return 0;
423
424 if (!PageUptodate(page)) {
425 page_cache_release(page);
426 return 0;
427 }
428
429 page_cache_release(page);
430 }
431
432 return 1;
433 }
434
435
436 static int fuse_cache_block(struct inode *inode, char *bl_buf,
437 size_t bl_index)
438 {
439 size_t start_index = bl_index << FUSE_BLOCK_PAGE_SHIFT;
440 size_t end_index = ((bl_index + 1) << FUSE_BLOCK_PAGE_SHIFT) - 1;
441 size_t file_end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
442
443 int i;
444
445 if (end_index > file_end_index)
446 end_index = file_end_index;
447
448 for (i = 0; start_index + i <= end_index; i++) {
449 size_t index = start_index + i;
450 struct page *page;
451 char *buffer;
452
453 page = grab_cache_page(inode->i_mapping, index);
454 if (!page)
455 return -1;
456
457 if (!PageUptodate(page)) {
458 buffer = kmap(page);
459 memcpy(buffer, bl_buf + i * PAGE_CACHE_SIZE,
460 PAGE_CACHE_SIZE);
461 flush_dcache_page(page);
462 SetPageUptodate(page);
463 kunmap(page);
464 }
465
466 unlock_page(page);
467 page_cache_release(page);
468 }
469
470 return 0;
471 }
472
473 static int fuse_file_read_block(struct file *file, struct inode *inode,
474 char *bl_buf, size_t bl_index)
475 {
476 ssize_t res;
477 loff_t offset;
478
479 offset = (loff_t) bl_index << FUSE_BLOCK_SHIFT;
480 res = fuse_send_read(file, inode, bl_buf, offset, FUSE_BLOCK_SIZE);
481 if (res >= 0) {
482 if (res < FUSE_BLOCK_SIZE)
483 memset(bl_buf + res, 0, FUSE_BLOCK_SIZE - res);
484 res = 0;
485 }
486 return res;
487 }
488
489 static void fuse_file_bigread(struct file *file, struct inode *inode,
490 loff_t pos, size_t count)
491 {
492 size_t bl_index = pos >> FUSE_BLOCK_SHIFT;
493 size_t bl_end_index = (pos + count) >> FUSE_BLOCK_SHIFT;
494 size_t bl_file_end_index = i_size_read(inode) >> FUSE_BLOCK_SHIFT;
495
496 if (bl_end_index > bl_file_end_index)
497 bl_end_index = bl_file_end_index;
498
499 while (bl_index <= bl_end_index) {
500 int res;
501 char *bl_buf = kmalloc(FUSE_BLOCK_SIZE, GFP_KERNEL);
502 if (!bl_buf)
503 break;
504 res = fuse_is_block_uptodate(inode, bl_index);
505 if (!res)
506 res = fuse_file_read_block(file, inode, bl_buf,
507 bl_index);
508 if (!res)
509 fuse_cache_block(inode, bl_buf, bl_index);
510 kfree(bl_buf);
511 bl_index++;
512 }
513 }
514 #endif
515
516 static ssize_t fuse_read(struct file *file, char *buf, size_t count,
517 loff_t *ppos)
518 {
519 struct inode *inode = file->f_dentry->d_inode;
520 struct fuse_conn *fc = INO_FC(inode);
521 char *tmpbuf;
522 ssize_t res = 0;
523 loff_t pos = *ppos;
524 unsigned int max_read = count < fc->max_read ? count : fc->max_read;
525
526 do {
527 tmpbuf = kmalloc(max_read, GFP_KERNEL);
528 if (tmpbuf)
529 break;
530
531 max_read /= 2;
532 } while (max_read > PAGE_CACHE_SIZE / 4);
533 if (!tmpbuf)
534 return -ENOMEM;
535
536 while (count) {
537 size_t nbytes = count < max_read ? count : max_read;
538 ssize_t res1;
539 res1 = fuse_send_read(file, inode, tmpbuf, pos, nbytes);
540 if (res1 < 0) {
541 if (!res)
542 res = res1;
543 break;
544 }
545 res += res1;
546 if (copy_to_user(buf, tmpbuf, res1)) {
547 res = -EFAULT;
548 break;
549 }
550 count -= res1;
551 buf += res1;
552 pos += res1;
553 if (res1 < nbytes)
554 break;
555 }
556 kfree(tmpbuf);
557
558 if (res > 0)
559 *ppos += res;
560
561 return res;
562 }
563
564 static ssize_t fuse_file_read(struct file *file, char *buf,
565 size_t count, loff_t * ppos)
566 {
567 struct inode *inode = file->f_dentry->d_inode;
568 struct fuse_conn *fc = INO_FC(inode);
569 ssize_t res;
570
571 if (fc->flags & FUSE_DIRECT_IO) {
572 res = fuse_read(file, buf, count, ppos);
573 }
574 else {
575 #ifndef KERNEL_2_6
576 if (fc->flags & FUSE_LARGE_READ) {
577 down(&inode->i_sem);
578 fuse_file_bigread(file, inode, *ppos, count);
579 up(&inode->i_sem);
580 }
581 #endif
582 res = generic_file_read(file, buf, count, ppos);
583 }
584
585 return res;
586 }
587
588 static ssize_t fuse_send_write(struct fuse_req *req, int writepage,
589 struct fuse_file *ff, struct inode *inode,
590 const char *buf, loff_t pos, size_t count)
591 {
592 struct fuse_conn *fc = INO_FC(inode);
593 struct fuse_write_in inarg;
594 struct fuse_write_out outarg;
595 ssize_t res;
596
597 memset(&inarg, 0, sizeof(inarg));
598 inarg.writepage = writepage;
599 inarg.fh = ff->fh;
600 inarg.offset = pos;
601 inarg.size = count;
602 req->in.h.opcode = FUSE_WRITE;
603 req->in.h.ino = inode->i_ino;
604 req->in.numargs = 2;
605 req->in.args[0].size = sizeof(inarg);
606 req->in.args[0].value = &inarg;
607 req->in.args[1].size = count;
608 req->in.args[1].value = buf;
609 req->out.numargs = 1;
610 req->out.args[0].size = sizeof(outarg);
611 req->out.args[0].value = &outarg;
612 request_send(fc, req);
613 res = req->out.h.error;
614 if (!res)
615 return outarg.size;
616 else
617 return res;
618 }
619
620 static int write_buffer(struct inode *inode, struct file *file,
621 struct page *page, unsigned offset, size_t count)
622 {
623 struct fuse_conn *fc = INO_FC(inode);
624 struct fuse_file *ff = file->private_data;
625 char *buffer;
626 ssize_t res;
627 loff_t pos;
628 struct fuse_req *req;
629
630 req = fuse_get_request(fc);
631 if (!req)
632 return -ERESTARTSYS;
633
634 pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset;
635 buffer = kmap(page);
636 res = fuse_send_write(req, 0, ff, inode, buffer + offset, pos, count);
637 fuse_put_request(fc, req);
638 if (res >= 0) {
639 if (res < count) {
640 printk("fuse: short write\n");
641 res = -EPROTO;
642 } else
643 res = 0;
644 }
645 kunmap(page);
646 if (res)
647 SetPageError(page);
648 return res;
649 }
650
651 static int get_write_count(struct inode *inode, struct page *page)
652 {
653 unsigned long end_index;
654 loff_t size = i_size_read(inode);
655 int count;
656
657 end_index = size >> PAGE_CACHE_SHIFT;
658 if (page->index < end_index)
659 count = PAGE_CACHE_SIZE;
660 else {
661 count = size & (PAGE_CACHE_SIZE - 1);
662 if (page->index > end_index || count == 0)
663 return 0;
664 }
665 return count;
666 }
667
668
669 static int write_page_block(struct inode *inode, struct page *page)
670 {
671 struct fuse_conn *fc = INO_FC(inode);
672 struct fuse_inode *fi = INO_FI(inode);
673 char *buffer;
674 ssize_t res;
675 loff_t pos;
676 unsigned count;
677 struct fuse_req *req;
678
679 req = fuse_get_request(fc);
680 if (!req)
681 return -ERESTARTSYS;
682
683 down_read(&fi->write_sem);
684 count = get_write_count(inode, page);
685 res = 0;
686 if (count) {
687 struct fuse_file *ff;
688 BUG_ON(list_empty(&fi->write_files));
689 ff = list_entry(fi->write_files.next, struct fuse_file, ff_list);
690 pos = ((loff_t) page->index << PAGE_CACHE_SHIFT);
691 buffer = kmap(page);
692 res = fuse_send_write(req, 1, ff, inode, buffer, pos, count);
693 if (res >= 0) {
694 if (res < count) {
695 printk("fuse: short write\n");
696 res = -EPROTO;
697 } else
698 res = 0;
699 }
700 }
701 up_read(&fi->write_sem);
702 fuse_put_request(fc, req);
703 kunmap(page);
704 if (res)
705 SetPageError(page);
706 return res;
707 }
708
709
710 #ifdef KERNEL_2_6
711
712
713 static void write_page_nonblock_end(struct fuse_conn *fc, struct fuse_req *req)
714 {
715 struct page *page = (struct page *) req->data;
716 struct inode *inode = page->mapping->host;
717 struct fuse_inode *fi = INO_FI(inode);
718 struct fuse_write_out *outarg = req->out.args[0].value;
719 if (!req->out.h.error && outarg->size != req->in.args[1].size) {
720 printk("fuse: short write\n");
721 req->out.h.error = -EPROTO;
722 }
723
724 if (req->out.h.error) {
725 SetPageError(page);
726 if (req->out.h.error == -ENOSPC)
727 set_bit(AS_ENOSPC, &page->mapping->flags);
728 else
729 set_bit(AS_EIO, &page->mapping->flags);
730 }
731 up_read(&fi->write_sem);
732
733 end_page_writeback(page);
734 kunmap(page);
735 fuse_put_request(fc, req);
736 }
737
738 static void send_write_nonblock(struct fuse_req *req, struct inode *inode,
739 struct page *page, unsigned count)
740 {
741 struct fuse_conn *fc = INO_FC(inode);
742 struct fuse_inode *fi = INO_FI(inode);
743 struct fuse_write_in *inarg;
744 struct fuse_file *ff;
745 char *buffer;
746
747 BUG_ON(list_empty(&fi->write_files));
748 ff = list_entry(fi->write_files.next, struct fuse_file, ff_list);
749
750 inarg = &req->misc.write.in;
751 buffer = kmap(page);
752 inarg->writepage = 1;
753 inarg->fh = ff->fh;
754 inarg->offset = ((loff_t) page->index << PAGE_CACHE_SHIFT);
755 inarg->size = count;
756 req->in.h.opcode = FUSE_WRITE;
757 req->in.h.ino = inode->i_ino;
758 req->in.numargs = 2;
759 req->in.args[0].size = sizeof(struct fuse_write_in);
760 req->in.args[0].value = inarg;
761 req->in.args[1].size = count;
762 req->in.args[1].value = buffer;
763 req->out.numargs = 1;
764 req->out.args[0].size = sizeof(struct fuse_write_out);
765 req->out.args[0].value = &req->misc.write.out;
766 request_send_nonblock(fc, req, write_page_nonblock_end, page);
767 }
768
769 static int write_page_nonblock(struct inode *inode, struct page *page)
770 {
771 struct fuse_conn *fc = INO_FC(inode);
772 struct fuse_inode *fi = INO_FI(inode);
773 struct fuse_req *req;
774 int err;
775
776 err = -EWOULDBLOCK;
777 req = fuse_get_request_nonblock(fc);
778 if (req) {
779 if (down_read_trylock(&fi->write_sem)) {
780 unsigned count;
781 err = 0;
782 count = get_write_count(inode, page);
783 if (count) {
784 SetPageWriteback(page);
785 send_write_nonblock(req, inode, page, count);
786 return 0;
787 }
788 up_read(&fi->write_sem);
789 }
790 fuse_put_request(fc, req);
791 }
792 return err;
793 }
794
795 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
796 {
797 int err;
798 struct inode *inode = page->mapping->host;
799
800 if (wbc->nonblocking) {
801 err = write_page_nonblock(inode, page);
802 if (err == -EWOULDBLOCK) {
803 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,6)
804 redirty_page_for_writepage(wbc, page);
805 #else
806 __set_page_dirty_nobuffers(page);
807 #endif
808 err = 0;
809 }
810 } else
811 err = write_page_block(inode, page);
812
813 unlock_page(page);
814 return err;
815 }
816 #else
817 static int fuse_writepage(struct page *page)
818 {
819 int err = write_page_block(page->mapping->host, page);
820 unlock_page(page);
821 return err;
822 }
823 #endif
824
825 static int fuse_prepare_write(struct file *file, struct page *page,
826 unsigned offset, unsigned to)
827 {
828 /* No op */
829 return 0;
830 }
831
832 static int fuse_commit_write(struct file *file, struct page *page,
833 unsigned offset, unsigned to)
834 {
835 int err;
836 struct inode *inode = page->mapping->host;
837
838 err = write_buffer(inode, file, page, offset, to - offset);
839 if (!err) {
840 loff_t pos = (page->index << PAGE_CACHE_SHIFT) + to;
841 if (pos > i_size_read(inode))
842 i_size_write(inode, pos);
843
844 if (offset == 0 && to == PAGE_CACHE_SIZE) {
845 #ifdef KERNEL_2_6
846 clear_page_dirty(page);
847 #else
848 ClearPageDirty(page);
849 #endif
850 SetPageUptodate(page);
851 }
852
853 }
854 return err;
855 }
856
857 static ssize_t fuse_write(struct file *file, const char *buf, size_t count,
858 loff_t *ppos)
859 {
860 struct inode *inode = file->f_dentry->d_inode;
861 struct fuse_conn *fc = INO_FC(inode);
862 struct fuse_file *ff = file->private_data;
863 char *tmpbuf;
864 ssize_t res = 0;
865 loff_t pos = *ppos;
866 struct fuse_req *req;
867
868 req = fuse_get_request(fc);
869 if (!req)
870 return -ERESTARTSYS;
871
872 tmpbuf = kmalloc(count < fc->max_write ? count : fc->max_write,
873 GFP_KERNEL);
874 if (!tmpbuf) {
875 fuse_put_request(fc, req);
876 return -ENOMEM;
877 }
878
879 while (count) {
880 size_t nbytes = count < fc->max_write ? count : fc->max_write;
881 ssize_t res1;
882 if (copy_from_user(tmpbuf, buf, nbytes)) {
883 res = -EFAULT;
884 break;
885 }
886 res1 = fuse_send_write(req, 0, ff, inode, tmpbuf, pos, nbytes);
887 if (res1 < 0) {
888 res = res1;
889 break;
890 }
891 res += res1;
892 count -= res1;
893 buf += res1;
894 pos += res1;
895 if (res1 < nbytes)
896 break;
897
898 if (count)
899 fuse_reset_request(req);
900 }
901 kfree(tmpbuf);
902 fuse_put_request(fc, req);
903
904 if (res > 0) {
905 if (pos > i_size_read(inode))
906 i_size_write(inode, pos);
907 *ppos = pos;
908 }
909
910 return res;
911 }
912
913 static ssize_t fuse_file_write(struct file *file, const char *buf,
914 size_t count, loff_t *ppos)
915 {
916 struct inode *inode = file->f_dentry->d_inode;
917 struct fuse_conn *fc = INO_FC(inode);
918
919 if (fc->flags & FUSE_DIRECT_IO) {
920 ssize_t res;
921 down(&inode->i_sem);
922 res = fuse_write(file, buf, count, ppos);
923 up(&inode->i_sem);
924 return res;
925 }
926 else
927 return generic_file_write(file, buf, count, ppos);
928 }
929
930 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
931 {
932 struct inode *inode = file->f_dentry->d_inode;
933 struct fuse_conn *fc = INO_FC(inode);
934
935 if (fc->flags & FUSE_DIRECT_IO)
936 return -ENODEV;
937 else {
938 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
939 (VM_WRITE | VM_SHARED)) {
940 struct fuse_inode *fi = INO_FI(inode);
941 struct fuse_file *ff = file->private_data;
942
943 if (!user_mmap && current->uid != 0)
944 return -EPERM;
945
946 down_write(&fi->write_sem);
947 if (list_empty(&ff->ff_list))
948 list_add(&ff->ff_list, &fi->write_files);
949 up_write(&fi->write_sem);
950 }
951 return generic_file_mmap(file, vma);
952 }
953 }
954
955 static struct file_operations fuse_file_operations = {
956 .read = fuse_file_read,
957 .write = fuse_file_write,
958 .mmap = fuse_file_mmap,
959 .open = fuse_open,
960 .flush = fuse_flush,
961 .release = fuse_release,
962 .fsync = fuse_fsync,
963 #ifdef KERNEL_2_6
964 .sendfile = generic_file_sendfile,
965 #endif
966 };
967
968 static struct address_space_operations fuse_file_aops = {
969 .readpage = fuse_readpage,
970 .writepage = fuse_writepage,
971 .prepare_write = fuse_prepare_write,
972 .commit_write = fuse_commit_write,
973 #ifdef KERNEL_2_6
974 .readpages = fuse_readpages,
975 .set_page_dirty = __set_page_dirty_nobuffers,
976 #endif
977 };
978
979 void fuse_init_file_inode(struct inode *inode)
980 {
981 inode->i_fop = &fuse_file_operations;
982 inode->i_data.a_ops = &fuse_file_aops;
983 }
984
985 /*
986 * Local Variables:
987 * indent-tabs-mode: t
988 * c-basic-offset: 8
989 * End:
990 */

  ViewVC Help
Powered by ViewVC 1.1.26