1 |
/* |
2 |
FUSE: Filesystem in Userspace |
3 |
Copyright (C) 2001-2004 Miklos Szeredi <miklos@szeredi.hu> |
4 |
|
5 |
This program can be distributed under the terms of the GNU GPL. |
6 |
See the file COPYING. |
7 |
*/ |
8 |
|
9 |
#include "fuse_i.h" |
10 |
|
11 |
#include <linux/poll.h> |
12 |
#include <linux/proc_fs.h> |
13 |
#include <linux/file.h> |
14 |
|
15 |
/* If more requests are outstanding, then the operation will block */ |
16 |
#define MAX_OUTSTANDING 10 |
17 |
|
18 |
static struct proc_dir_entry *proc_fs_fuse; |
19 |
struct proc_dir_entry *proc_fuse_dev; |
20 |
static kmem_cache_t *fuse_req_cachep; |
21 |
|
22 |
struct fuse_req *fuse_request_alloc(void) |
23 |
{ |
24 |
struct fuse_req *req; |
25 |
|
26 |
req = (struct fuse_req *) kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); |
27 |
if (req) { |
28 |
memset(req, 0, sizeof(*req)); |
29 |
INIT_LIST_HEAD(&req->list); |
30 |
init_waitqueue_head(&req->waitq); |
31 |
} |
32 |
|
33 |
return req; |
34 |
} |
35 |
|
36 |
void fuse_request_free(struct fuse_req *req) |
37 |
{ |
38 |
kmem_cache_free(fuse_req_cachep, req); |
39 |
} |
40 |
|
41 |
static int request_restartable(enum fuse_opcode opcode) |
42 |
{ |
43 |
switch (opcode) { |
44 |
case FUSE_LOOKUP: |
45 |
case FUSE_GETATTR: |
46 |
case FUSE_SETATTR: |
47 |
case FUSE_READLINK: |
48 |
case FUSE_GETDIR: |
49 |
case FUSE_OPEN: |
50 |
case FUSE_READ: |
51 |
case FUSE_WRITE: |
52 |
case FUSE_STATFS: |
53 |
case FUSE_FSYNC: |
54 |
case FUSE_GETXATTR: |
55 |
case FUSE_SETXATTR: |
56 |
case FUSE_LISTXATTR: |
57 |
return 1; |
58 |
|
59 |
default: |
60 |
return 0; |
61 |
} |
62 |
} |
63 |
|
64 |
/* Called with fuse_lock held. Releases, and then reaquires it. */ |
65 |
static void request_wait_answer(struct fuse_req *req) |
66 |
{ |
67 |
int intr; |
68 |
|
69 |
spin_unlock(&fuse_lock); |
70 |
intr = wait_event_interruptible(req->waitq, req->finished); |
71 |
spin_lock(&fuse_lock); |
72 |
if (!intr) |
73 |
return; |
74 |
|
75 |
/* Request interrupted... Wait for it to be unlocked */ |
76 |
if (req->locked) { |
77 |
req->interrupted = 1; |
78 |
spin_unlock(&fuse_lock); |
79 |
wait_event(req->waitq, !req->locked); |
80 |
spin_lock(&fuse_lock); |
81 |
} |
82 |
|
83 |
/* Operations which modify the filesystem cannot safely be |
84 |
restarted, because it is uncertain whether the operation has |
85 |
completed or not... */ |
86 |
if (req->sent && !request_restartable(req->in.h.opcode)) |
87 |
req->out.h.error = -EINTR; |
88 |
else |
89 |
req->out.h.error = -ERESTARTSYS; |
90 |
} |
91 |
|
92 |
static int get_unique(struct fuse_conn *fc) |
93 |
{ |
94 |
do fc->reqctr++; |
95 |
while (!fc->reqctr); |
96 |
return fc->reqctr; |
97 |
} |
98 |
|
99 |
void fuse_reset_request(struct fuse_req *req) |
100 |
{ |
101 |
int preallocated = req->preallocated; |
102 |
|
103 |
memset(req, 0, sizeof(*req)); |
104 |
INIT_LIST_HEAD(&req->list); |
105 |
init_waitqueue_head(&req->waitq); |
106 |
req->preallocated = preallocated; |
107 |
} |
108 |
|
109 |
static struct fuse_req *do_get_request(struct fuse_conn *fc) |
110 |
{ |
111 |
struct fuse_req *req; |
112 |
|
113 |
spin_lock(&fuse_lock); |
114 |
BUG_ON(list_empty(&fc->unused_list)); |
115 |
req = list_entry(fc->unused_list.next, struct fuse_req, list); |
116 |
list_del_init(&req->list); |
117 |
spin_unlock(&fuse_lock); |
118 |
fuse_reset_request(req); |
119 |
return req; |
120 |
} |
121 |
|
122 |
struct fuse_req *fuse_get_request(struct fuse_conn *fc) |
123 |
{ |
124 |
struct fuse_req *req; |
125 |
|
126 |
if (down_interruptible(&fc->unused_sem)) |
127 |
return NULL; |
128 |
|
129 |
req = do_get_request(fc); |
130 |
req->in.h.uid = current->fsuid; |
131 |
req->in.h.gid = current->fsgid; |
132 |
return req; |
133 |
} |
134 |
|
135 |
struct fuse_req *fuse_get_request_nonblock(struct fuse_conn *fc) |
136 |
{ |
137 |
struct fuse_req *req; |
138 |
|
139 |
if (down_trylock(&fc->unused_sem)) |
140 |
return NULL; |
141 |
|
142 |
req = do_get_request(fc); |
143 |
req->in.h.uid = current->fsuid; |
144 |
req->in.h.gid = current->fsgid; |
145 |
return req; |
146 |
} |
147 |
|
148 |
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
149 |
{ |
150 |
if (!req->preallocated) |
151 |
fuse_request_free(req); |
152 |
else { |
153 |
spin_lock(&fuse_lock); |
154 |
list_add(&req->list, &fc->unused_list); |
155 |
spin_unlock(&fuse_lock); |
156 |
up(&fc->unused_sem); |
157 |
} |
158 |
} |
159 |
|
160 |
/* Must be called with fuse_lock held, and unlocks it */ |
161 |
static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
162 |
{ |
163 |
fuse_reqend_t endfunc = req->end; |
164 |
|
165 |
if (!endfunc) { |
166 |
wake_up(&req->waitq); |
167 |
spin_unlock(&fuse_lock); |
168 |
} else { |
169 |
spin_unlock(&fuse_lock); |
170 |
endfunc(fc, req); |
171 |
} |
172 |
} |
173 |
|
174 |
void request_send(struct fuse_conn *fc, struct fuse_req *req) |
175 |
{ |
176 |
req->issync = 1; |
177 |
req->end = NULL; |
178 |
|
179 |
spin_lock(&fuse_lock); |
180 |
req->out.h.error = -ENOTCONN; |
181 |
if (fc->file) { |
182 |
req->in.h.unique = get_unique(fc); |
183 |
list_add_tail(&req->list, &fc->pending); |
184 |
wake_up(&fc->waitq); |
185 |
request_wait_answer(req); |
186 |
list_del(&req->list); |
187 |
} |
188 |
spin_unlock(&fuse_lock); |
189 |
} |
190 |
|
191 |
void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) |
192 |
{ |
193 |
req->issync = 0; |
194 |
|
195 |
if (fc->file) { |
196 |
spin_lock(&fuse_lock); |
197 |
list_add_tail(&req->list, &fc->pending); |
198 |
wake_up(&fc->waitq); |
199 |
spin_unlock(&fuse_lock); |
200 |
} else |
201 |
fuse_put_request(fc, req); |
202 |
} |
203 |
|
204 |
void request_send_nonblock(struct fuse_conn *fc, struct fuse_req *req, |
205 |
fuse_reqend_t end, void *data) |
206 |
{ |
207 |
req->end = end; |
208 |
req->data = data; |
209 |
req->issync = 1; |
210 |
|
211 |
spin_lock(&fuse_lock); |
212 |
if (fc->file) { |
213 |
req->in.h.unique = get_unique(fc); |
214 |
list_add_tail(&req->list, &fc->pending); |
215 |
wake_up(&fc->waitq); |
216 |
spin_unlock(&fuse_lock); |
217 |
} else { |
218 |
req->out.h.error = -ENOTCONN; |
219 |
request_end(fc, req); |
220 |
} |
221 |
} |
222 |
|
223 |
static void request_wait(struct fuse_conn *fc) |
224 |
{ |
225 |
DECLARE_WAITQUEUE(wait, current); |
226 |
|
227 |
add_wait_queue_exclusive(&fc->waitq, &wait); |
228 |
while (fc->sb != NULL && list_empty(&fc->pending)) { |
229 |
set_current_state(TASK_INTERRUPTIBLE); |
230 |
if (signal_pending(current)) |
231 |
break; |
232 |
|
233 |
spin_unlock(&fuse_lock); |
234 |
schedule(); |
235 |
spin_lock(&fuse_lock); |
236 |
} |
237 |
set_current_state(TASK_RUNNING); |
238 |
remove_wait_queue(&fc->waitq, &wait); |
239 |
} |
240 |
|
241 |
static inline int copy_in_one(const void *src, size_t srclen, char **dstp, |
242 |
size_t *dstlenp) |
243 |
{ |
244 |
if (*dstlenp < srclen) { |
245 |
printk("fuse_dev_read: buffer too small\n"); |
246 |
return -EINVAL; |
247 |
} |
248 |
|
249 |
if (srclen && copy_to_user(*dstp, src, srclen)) |
250 |
return -EFAULT; |
251 |
|
252 |
*dstp += srclen; |
253 |
*dstlenp -= srclen; |
254 |
|
255 |
return 0; |
256 |
} |
257 |
|
258 |
static inline int copy_in_args(struct fuse_in *in, char *buf, size_t nbytes) |
259 |
{ |
260 |
int err; |
261 |
int i; |
262 |
size_t orignbytes = nbytes; |
263 |
|
264 |
err = copy_in_one(&in->h, sizeof(in->h), &buf, &nbytes); |
265 |
if (err) |
266 |
return err; |
267 |
|
268 |
for (i = 0; i < in->numargs; i++) { |
269 |
struct fuse_in_arg *arg = &in->args[i]; |
270 |
err = copy_in_one(arg->value, arg->size, &buf, &nbytes); |
271 |
if (err) |
272 |
return err; |
273 |
} |
274 |
|
275 |
return orignbytes - nbytes; |
276 |
} |
277 |
|
278 |
static ssize_t fuse_dev_read(struct file *file, char *buf, size_t nbytes, |
279 |
loff_t *off) |
280 |
{ |
281 |
ssize_t ret; |
282 |
struct fuse_conn *fc = DEV_FC(file); |
283 |
struct fuse_req *req = NULL; |
284 |
|
285 |
spin_lock(&fuse_lock); |
286 |
request_wait(fc); |
287 |
if (fc->sb != NULL && !list_empty(&fc->pending)) { |
288 |
req = list_entry(fc->pending.next, struct fuse_req, list); |
289 |
list_del_init(&req->list); |
290 |
req->locked = 1; |
291 |
} |
292 |
spin_unlock(&fuse_lock); |
293 |
if (fc->sb == NULL) |
294 |
return -ENODEV; |
295 |
if (req == NULL) |
296 |
return -EINTR; |
297 |
|
298 |
ret = copy_in_args(&req->in, buf, nbytes); |
299 |
spin_lock(&fuse_lock); |
300 |
if (req->issync) { |
301 |
if (ret < 0) { |
302 |
req->out.h.error = -EPROTO; |
303 |
req->finished = 1; |
304 |
} else { |
305 |
list_add_tail(&req->list, &fc->processing); |
306 |
req->sent = 1; |
307 |
} |
308 |
req->locked = 0; |
309 |
if (ret < 0 || req->interrupted) |
310 |
/* Unlocks fuse_lock: */ |
311 |
request_end(fc, req); |
312 |
else |
313 |
spin_unlock(&fuse_lock); |
314 |
} else { |
315 |
spin_unlock(&fuse_lock); |
316 |
fuse_put_request(fc, req); |
317 |
} |
318 |
return ret; |
319 |
} |
320 |
|
321 |
static struct fuse_req *request_find(struct fuse_conn *fc, unsigned int unique) |
322 |
{ |
323 |
struct list_head *entry; |
324 |
struct fuse_req *req = NULL; |
325 |
|
326 |
list_for_each(entry, &fc->processing) { |
327 |
struct fuse_req *tmp; |
328 |
tmp = list_entry(entry, struct fuse_req, list); |
329 |
if (tmp->in.h.unique == unique) { |
330 |
req = tmp; |
331 |
break; |
332 |
} |
333 |
} |
334 |
|
335 |
return req; |
336 |
} |
337 |
|
338 |
static void process_getdir(struct fuse_req *req) |
339 |
{ |
340 |
struct fuse_getdir_out_i *arg; |
341 |
arg = (struct fuse_getdir_out_i *) req->out.args[0].value; |
342 |
arg->file = fget(arg->fd); |
343 |
} |
344 |
|
345 |
static inline int copy_out_one(struct fuse_out_arg *arg, const char **srcp, |
346 |
size_t *srclenp, int allowvar) |
347 |
{ |
348 |
size_t dstlen = arg->size; |
349 |
if (*srclenp < dstlen) { |
350 |
if (!allowvar) { |
351 |
printk("fuse_dev_write: write is short\n"); |
352 |
return -EINVAL; |
353 |
} |
354 |
dstlen = *srclenp; |
355 |
} |
356 |
|
357 |
if (dstlen && copy_from_user(arg->value, *srcp, dstlen)) |
358 |
return -EFAULT; |
359 |
|
360 |
*srcp += dstlen; |
361 |
*srclenp -= dstlen; |
362 |
arg->size = dstlen; |
363 |
|
364 |
return 0; |
365 |
} |
366 |
|
367 |
static inline int copy_out_args(struct fuse_req *req, const char *buf, |
368 |
size_t nbytes) |
369 |
{ |
370 |
struct fuse_out *out = &req->out; |
371 |
int err; |
372 |
int i; |
373 |
|
374 |
buf += sizeof(struct fuse_out_header); |
375 |
nbytes -= sizeof(struct fuse_out_header); |
376 |
|
377 |
if (!out->h.error) { |
378 |
if (req->copy_out) |
379 |
return req->copy_out(req, buf, nbytes); |
380 |
else { |
381 |
for (i = 0; i < out->numargs; i++) { |
382 |
struct fuse_out_arg *arg = &out->args[i]; |
383 |
int allowvar; |
384 |
|
385 |
if (out->argvar && i == out->numargs - 1) |
386 |
allowvar = 1; |
387 |
else |
388 |
allowvar = 0; |
389 |
|
390 |
err = copy_out_one(arg, &buf, &nbytes, allowvar); |
391 |
if (err) |
392 |
return err; |
393 |
} |
394 |
} |
395 |
} |
396 |
|
397 |
if (nbytes != 0) { |
398 |
printk("fuse_dev_write: write is long\n"); |
399 |
return -EINVAL; |
400 |
} |
401 |
|
402 |
return 0; |
403 |
} |
404 |
|
405 |
static inline int copy_out_header(struct fuse_out_header *oh, const char *buf, |
406 |
size_t nbytes) |
407 |
{ |
408 |
if (nbytes < sizeof(struct fuse_out_header)) { |
409 |
printk("fuse_dev_write: write is short\n"); |
410 |
return -EINVAL; |
411 |
} |
412 |
|
413 |
if (copy_from_user(oh, buf, sizeof(struct fuse_out_header))) |
414 |
return -EFAULT; |
415 |
|
416 |
return 0; |
417 |
} |
418 |
|
419 |
#ifdef KERNEL_2_6 |
420 |
static int fuse_invalidate(struct fuse_conn *fc, struct fuse_user_header *uh) |
421 |
{ |
422 |
struct inode *inode = ilookup(fc->sb, uh->ino); |
423 |
if (!inode) |
424 |
return -ENOENT; |
425 |
fuse_sync_inode(inode); |
426 |
invalidate_inode_pages(inode->i_mapping); |
427 |
iput(inode); |
428 |
return 0; |
429 |
} |
430 |
#else |
431 |
static int fuse_invalidate(struct fuse_conn *fc, struct fuse_user_header *uh) |
432 |
{ |
433 |
struct inode *inode = iget(fc->sb, uh->ino); |
434 |
int err = -ENOENT; |
435 |
if (inode) { |
436 |
if (INO_FI(inode)) { |
437 |
fuse_sync_inode(inode); |
438 |
invalidate_inode_pages(inode); |
439 |
err = 0; |
440 |
} |
441 |
iput(inode); |
442 |
} |
443 |
return err; |
444 |
} |
445 |
#endif |
446 |
|
447 |
static int fuse_user_request(struct fuse_conn *fc, const char *buf, |
448 |
size_t nbytes) |
449 |
{ |
450 |
struct fuse_user_header uh; |
451 |
int err; |
452 |
|
453 |
if (nbytes < sizeof(struct fuse_user_header)) { |
454 |
printk("fuse_dev_write: write is short\n"); |
455 |
return -EINVAL; |
456 |
} |
457 |
|
458 |
if (copy_from_user(&uh, buf, sizeof(struct fuse_user_header))) |
459 |
return -EFAULT; |
460 |
|
461 |
switch (uh.opcode) { |
462 |
case FUSE_INVALIDATE: |
463 |
err = fuse_invalidate(fc, &uh); |
464 |
break; |
465 |
|
466 |
default: |
467 |
err = -ENOSYS; |
468 |
} |
469 |
return err; |
470 |
} |
471 |
|
472 |
|
473 |
static ssize_t fuse_dev_write(struct file *file, const char *buf, |
474 |
size_t nbytes, loff_t *off) |
475 |
{ |
476 |
int err; |
477 |
struct fuse_conn *fc = DEV_FC(file); |
478 |
struct fuse_req *req; |
479 |
struct fuse_out_header oh; |
480 |
|
481 |
if (!fc->sb) |
482 |
return -EPERM; |
483 |
|
484 |
err = copy_out_header(&oh, buf, nbytes); |
485 |
if (err) |
486 |
return err; |
487 |
|
488 |
if (!oh.unique) { |
489 |
err = fuse_user_request(fc, buf, nbytes); |
490 |
goto out; |
491 |
} |
492 |
|
493 |
if (oh.error <= -1000 || oh.error > 0) { |
494 |
printk("fuse_dev_write: bad error value\n"); |
495 |
return -EINVAL; |
496 |
} |
497 |
|
498 |
spin_lock(&fuse_lock); |
499 |
req = request_find(fc, oh.unique); |
500 |
if (req != NULL) { |
501 |
list_del_init(&req->list); |
502 |
req->locked = 1; |
503 |
} |
504 |
spin_unlock(&fuse_lock); |
505 |
if (!req) |
506 |
return -ENOENT; |
507 |
|
508 |
req->out.h = oh; |
509 |
err = copy_out_args(req, buf, nbytes); |
510 |
|
511 |
spin_lock(&fuse_lock); |
512 |
if (err) |
513 |
req->out.h.error = -EPROTO; |
514 |
else { |
515 |
/* fget() needs to be done in this context */ |
516 |
if (req->in.h.opcode == FUSE_GETDIR && !oh.error) |
517 |
process_getdir(req); |
518 |
} |
519 |
req->finished = 1; |
520 |
req->locked = 0; |
521 |
/* Unlocks fuse_lock: */ |
522 |
request_end(fc, req); |
523 |
|
524 |
out: |
525 |
if (!err) |
526 |
return nbytes; |
527 |
else |
528 |
return err; |
529 |
} |
530 |
|
531 |
|
532 |
static unsigned int fuse_dev_poll(struct file *file, poll_table *wait) |
533 |
{ |
534 |
struct fuse_conn *fc = DEV_FC(file); |
535 |
unsigned int mask = POLLOUT | POLLWRNORM; |
536 |
|
537 |
if (!fc->sb) |
538 |
return -EPERM; |
539 |
|
540 |
poll_wait(file, &fc->waitq, wait); |
541 |
|
542 |
spin_lock(&fuse_lock); |
543 |
if (!list_empty(&fc->pending)) |
544 |
mask |= POLLIN | POLLRDNORM; |
545 |
spin_unlock(&fuse_lock); |
546 |
|
547 |
return mask; |
548 |
} |
549 |
|
550 |
static void free_conn(struct fuse_conn *fc) |
551 |
{ |
552 |
while (!list_empty(&fc->unused_list)) { |
553 |
struct fuse_req *req; |
554 |
req = list_entry(fc->unused_list.next, struct fuse_req, list); |
555 |
list_del(&req->list); |
556 |
fuse_request_free(req); |
557 |
} |
558 |
kfree(fc); |
559 |
} |
560 |
|
561 |
/* Must be called with the fuse lock held */ |
562 |
void fuse_release_conn(struct fuse_conn *fc) |
563 |
{ |
564 |
if (fc->sb == NULL && fc->file == NULL) { |
565 |
free_conn(fc); |
566 |
} |
567 |
} |
568 |
|
569 |
static struct fuse_conn *new_conn(void) |
570 |
{ |
571 |
struct fuse_conn *fc; |
572 |
|
573 |
fc = kmalloc(sizeof(*fc), GFP_KERNEL); |
574 |
if (fc != NULL) { |
575 |
int i; |
576 |
memset(fc, 0, sizeof(*fc)); |
577 |
fc->sb = NULL; |
578 |
fc->file = NULL; |
579 |
fc->flags = 0; |
580 |
fc->uid = 0; |
581 |
init_waitqueue_head(&fc->waitq); |
582 |
INIT_LIST_HEAD(&fc->pending); |
583 |
INIT_LIST_HEAD(&fc->processing); |
584 |
INIT_LIST_HEAD(&fc->unused_list); |
585 |
sema_init(&fc->unused_sem, MAX_OUTSTANDING); |
586 |
for (i = 0; i < MAX_OUTSTANDING; i++) { |
587 |
struct fuse_req *req = fuse_request_alloc(); |
588 |
if (!req) { |
589 |
free_conn(fc); |
590 |
return NULL; |
591 |
} |
592 |
req->preallocated = 1; |
593 |
list_add(&req->list, &fc->unused_list); |
594 |
} |
595 |
fc->reqctr = 1; |
596 |
} |
597 |
return fc; |
598 |
} |
599 |
|
600 |
static int fuse_dev_open(struct inode *inode, struct file *file) |
601 |
{ |
602 |
struct fuse_conn *fc; |
603 |
|
604 |
fc = new_conn(); |
605 |
if (!fc) |
606 |
return -ENOMEM; |
607 |
|
608 |
fc->file = file; |
609 |
file->private_data = fc; |
610 |
|
611 |
return 0; |
612 |
} |
613 |
|
614 |
static void end_requests(struct fuse_conn *fc, struct list_head *head) |
615 |
{ |
616 |
while (!list_empty(head)) { |
617 |
struct fuse_req *req; |
618 |
req = list_entry(head->next, struct fuse_req, list); |
619 |
list_del_init(&req->list); |
620 |
if (req->issync) { |
621 |
req->out.h.error = -ECONNABORTED; |
622 |
req->finished = 1; |
623 |
/* Unlocks fuse_lock: */ |
624 |
request_end(fc, req); |
625 |
spin_lock(&fuse_lock); |
626 |
} else { |
627 |
spin_unlock(&fuse_lock); |
628 |
fuse_put_request(fc, req); |
629 |
spin_lock(&fuse_lock); |
630 |
} |
631 |
} |
632 |
} |
633 |
|
634 |
static int fuse_dev_release(struct inode *inode, struct file *file) |
635 |
{ |
636 |
struct fuse_conn *fc = DEV_FC(file); |
637 |
|
638 |
spin_lock(&fuse_lock); |
639 |
fc->file = NULL; |
640 |
end_requests(fc, &fc->pending); |
641 |
end_requests(fc, &fc->processing); |
642 |
fuse_release_conn(fc); |
643 |
spin_unlock(&fuse_lock); |
644 |
return 0; |
645 |
} |
646 |
|
647 |
static struct file_operations fuse_dev_operations = { |
648 |
.owner = THIS_MODULE, |
649 |
.read = fuse_dev_read, |
650 |
.write = fuse_dev_write, |
651 |
.poll = fuse_dev_poll, |
652 |
.open = fuse_dev_open, |
653 |
.release = fuse_dev_release, |
654 |
}; |
655 |
|
656 |
static int read_version(char *page, char **start, off_t off, int count, |
657 |
int *eof, void *data) |
658 |
{ |
659 |
char *s = page; |
660 |
s += sprintf(s, "%i.%i\n", FUSE_KERNEL_VERSION, |
661 |
FUSE_KERNEL_MINOR_VERSION); |
662 |
return s - page; |
663 |
} |
664 |
|
665 |
int fuse_dev_init() |
666 |
{ |
667 |
proc_fs_fuse = NULL; |
668 |
proc_fuse_dev = NULL; |
669 |
|
670 |
fuse_req_cachep = kmem_cache_create("fuser_request", |
671 |
sizeof(struct fuse_req), |
672 |
0, 0, NULL, NULL); |
673 |
if (!fuse_req_cachep) |
674 |
return -ENOMEM; |
675 |
|
676 |
proc_fs_fuse = proc_mkdir("fuse", proc_root_fs); |
677 |
if (proc_fs_fuse) { |
678 |
struct proc_dir_entry *de; |
679 |
|
680 |
proc_fs_fuse->owner = THIS_MODULE; |
681 |
proc_fuse_dev = create_proc_entry("dev", S_IFSOCK | 0666, |
682 |
proc_fs_fuse); |
683 |
if (proc_fuse_dev) { |
684 |
proc_fuse_dev->owner = THIS_MODULE; |
685 |
proc_fuse_dev->proc_fops = &fuse_dev_operations; |
686 |
} |
687 |
de = create_proc_entry("version", S_IFREG | 0444, proc_fs_fuse); |
688 |
if (de) { |
689 |
de->owner = THIS_MODULE; |
690 |
de->read_proc = read_version; |
691 |
} |
692 |
} |
693 |
return 0; |
694 |
} |
695 |
|
696 |
void fuse_dev_cleanup() |
697 |
{ |
698 |
if (proc_fs_fuse) { |
699 |
remove_proc_entry("dev", proc_fs_fuse); |
700 |
remove_proc_entry("version", proc_fs_fuse); |
701 |
remove_proc_entry("fuse", proc_root_fs); |
702 |
} |
703 |
|
704 |
kmem_cache_destroy(fuse_req_cachep); |
705 |
} |
706 |
|
707 |
/* |
708 |
* Local Variables: |
709 |
* indent-tabs-mode: t |
710 |
* c-basic-offset: 8 |
711 |
* End: |
712 |
*/ |