fuse_lowlevel.c (71053B)
1/* 2 * FUSE: Filesystem in Userspace 3 * Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu> 4 * 5 * Implementation of (most of) the low-level FUSE API. The session loop 6 * functions are implemented in separate files. 7 * 8 * This program can be distributed under the terms of the GNU LGPLv2. 9 * See the file COPYING.LIB 10 */ 11 12#include "qemu/osdep.h" 13#include "fuse_i.h" 14#include "standard-headers/linux/fuse.h" 15#include "fuse_misc.h" 16#include "fuse_opt.h" 17#include "fuse_virtio.h" 18 19#include <sys/file.h> 20 21#define THREAD_POOL_SIZE 0 22 23#define OFFSET_MAX 0x7fffffffffffffffLL 24 25struct fuse_pollhandle { 26 uint64_t kh; 27 struct fuse_session *se; 28}; 29 30static size_t pagesize; 31 32static __attribute__((constructor)) void fuse_ll_init_pagesize(void) 33{ 34 pagesize = getpagesize(); 35} 36 37static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr) 38{ 39 *attr = (struct fuse_attr){ 40 .ino = stbuf->st_ino, 41 .mode = stbuf->st_mode, 42 .nlink = stbuf->st_nlink, 43 .uid = stbuf->st_uid, 44 .gid = stbuf->st_gid, 45 .rdev = stbuf->st_rdev, 46 .size = stbuf->st_size, 47 .blksize = stbuf->st_blksize, 48 .blocks = stbuf->st_blocks, 49 .atime = stbuf->st_atime, 50 .mtime = stbuf->st_mtime, 51 .ctime = stbuf->st_ctime, 52 .atimensec = ST_ATIM_NSEC(stbuf), 53 .mtimensec = ST_MTIM_NSEC(stbuf), 54 .ctimensec = ST_CTIM_NSEC(stbuf), 55 }; 56} 57 58static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf) 59{ 60 stbuf->st_mode = attr->mode; 61 stbuf->st_uid = attr->uid; 62 stbuf->st_gid = attr->gid; 63 stbuf->st_size = attr->size; 64 stbuf->st_atime = attr->atime; 65 stbuf->st_mtime = attr->mtime; 66 stbuf->st_ctime = attr->ctime; 67 ST_ATIM_NSEC_SET(stbuf, attr->atimensec); 68 ST_MTIM_NSEC_SET(stbuf, attr->mtimensec); 69 ST_CTIM_NSEC_SET(stbuf, attr->ctimensec); 70} 71 72static size_t iov_length(const struct iovec *iov, size_t count) 73{ 74 size_t seg; 75 size_t ret = 0; 76 77 for (seg = 0; seg < count; seg++) { 78 ret += iov[seg].iov_len; 79 } 80 return ret; 81} 82 83static void list_init_req(struct fuse_req *req) 84{ 85 req->next = req; 86 req->prev = req; 87} 88 89static void list_del_req(struct fuse_req *req) 90{ 91 struct fuse_req *prev = req->prev; 92 struct fuse_req *next = req->next; 93 prev->next = next; 94 next->prev = prev; 95} 96 97static void list_add_req(struct fuse_req *req, struct fuse_req *next) 98{ 99 struct fuse_req *prev = next->prev; 100 req->next = next; 101 req->prev = prev; 102 prev->next = req; 103 next->prev = req; 104} 105 106static void destroy_req(fuse_req_t req) 107{ 108 pthread_mutex_destroy(&req->lock); 109 g_free(req); 110} 111 112void fuse_free_req(fuse_req_t req) 113{ 114 int ctr; 115 struct fuse_session *se = req->se; 116 117 pthread_mutex_lock(&se->lock); 118 req->u.ni.func = NULL; 119 req->u.ni.data = NULL; 120 list_del_req(req); 121 ctr = --req->ctr; 122 req->ch = NULL; 123 pthread_mutex_unlock(&se->lock); 124 if (!ctr) { 125 destroy_req(req); 126 } 127} 128 129static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se) 130{ 131 struct fuse_req *req; 132 133 req = g_try_new0(struct fuse_req, 1); 134 if (req == NULL) { 135 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n"); 136 } else { 137 req->se = se; 138 req->ctr = 1; 139 list_init_req(req); 140 fuse_mutex_init(&req->lock); 141 } 142 143 return req; 144} 145 146/* Send data. If *ch* is NULL, send via session master fd */ 147static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch, 148 struct iovec *iov, int count) 149{ 150 struct fuse_out_header *out = iov[0].iov_base; 151 152 out->len = iov_length(iov, count); 153 if (out->unique == 0) { 154 fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n", out->error, 155 out->len); 156 } else if (out->error) { 157 fuse_log(FUSE_LOG_DEBUG, 158 " unique: %llu, error: %i (%s), outsize: %i\n", 159 (unsigned long long)out->unique, out->error, 160 strerror(-out->error), out->len); 161 } else { 162 fuse_log(FUSE_LOG_DEBUG, " unique: %llu, success, outsize: %i\n", 163 (unsigned long long)out->unique, out->len); 164 } 165 166 if (fuse_lowlevel_is_virtio(se)) { 167 return virtio_send_msg(se, ch, iov, count); 168 } 169 170 abort(); /* virtio should have taken it before here */ 171 return 0; 172} 173 174 175int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov, 176 int count) 177{ 178 struct fuse_out_header out = { 179 .unique = req->unique, 180 .error = error, 181 }; 182 183 if (error <= -1000 || error > 0) { 184 fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error); 185 out.error = -ERANGE; 186 } 187 188 iov[0].iov_base = &out; 189 iov[0].iov_len = sizeof(struct fuse_out_header); 190 191 return fuse_send_msg(req->se, req->ch, iov, count); 192} 193 194static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov, 195 int count) 196{ 197 int res; 198 199 res = fuse_send_reply_iov_nofree(req, error, iov, count); 200 fuse_free_req(req); 201 return res; 202} 203 204static int send_reply(fuse_req_t req, int error, const void *arg, 205 size_t argsize) 206{ 207 struct iovec iov[2]; 208 int count = 1; 209 if (argsize) { 210 iov[1].iov_base = (void *)arg; 211 iov[1].iov_len = argsize; 212 count++; 213 } 214 return send_reply_iov(req, error, iov, count); 215} 216 217int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count) 218{ 219 int res; 220 g_autofree struct iovec *padded_iov = NULL; 221 222 padded_iov = g_try_new(struct iovec, count + 1); 223 if (padded_iov == NULL) { 224 return fuse_reply_err(req, ENOMEM); 225 } 226 227 memcpy(padded_iov + 1, iov, count * sizeof(struct iovec)); 228 count++; 229 230 res = send_reply_iov(req, 0, padded_iov, count); 231 232 return res; 233} 234 235 236/* 237 * 'buf` is allowed to be empty so that the proper size may be 238 * allocated by the caller 239 */ 240size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, 241 const char *name, const struct stat *stbuf, off_t off) 242{ 243 (void)req; 244 size_t namelen; 245 size_t entlen; 246 size_t entlen_padded; 247 struct fuse_dirent *dirent; 248 249 namelen = strlen(name); 250 entlen = FUSE_NAME_OFFSET + namelen; 251 entlen_padded = FUSE_DIRENT_ALIGN(entlen); 252 253 if ((buf == NULL) || (entlen_padded > bufsize)) { 254 return entlen_padded; 255 } 256 257 dirent = (struct fuse_dirent *)buf; 258 dirent->ino = stbuf->st_ino; 259 dirent->off = off; 260 dirent->namelen = namelen; 261 dirent->type = (stbuf->st_mode & S_IFMT) >> 12; 262 memcpy(dirent->name, name, namelen); 263 memset(dirent->name + namelen, 0, entlen_padded - entlen); 264 265 return entlen_padded; 266} 267 268static void convert_statfs(const struct statvfs *stbuf, 269 struct fuse_kstatfs *kstatfs) 270{ 271 *kstatfs = (struct fuse_kstatfs){ 272 .bsize = stbuf->f_bsize, 273 .frsize = stbuf->f_frsize, 274 .blocks = stbuf->f_blocks, 275 .bfree = stbuf->f_bfree, 276 .bavail = stbuf->f_bavail, 277 .files = stbuf->f_files, 278 .ffree = stbuf->f_ffree, 279 .namelen = stbuf->f_namemax, 280 }; 281} 282 283static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize) 284{ 285 return send_reply(req, 0, arg, argsize); 286} 287 288int fuse_reply_err(fuse_req_t req, int err) 289{ 290 return send_reply(req, -err, NULL, 0); 291} 292 293void fuse_reply_none(fuse_req_t req) 294{ 295 fuse_free_req(req); 296} 297 298static unsigned long calc_timeout_sec(double t) 299{ 300 if (t > (double)ULONG_MAX) { 301 return ULONG_MAX; 302 } else if (t < 0.0) { 303 return 0; 304 } else { 305 return (unsigned long)t; 306 } 307} 308 309static unsigned int calc_timeout_nsec(double t) 310{ 311 double f = t - (double)calc_timeout_sec(t); 312 if (f < 0.0) { 313 return 0; 314 } else if (f >= 0.999999999) { 315 return 999999999; 316 } else { 317 return (unsigned int)(f * 1.0e9); 318 } 319} 320 321static void fill_entry(struct fuse_entry_out *arg, 322 const struct fuse_entry_param *e) 323{ 324 *arg = (struct fuse_entry_out){ 325 .nodeid = e->ino, 326 .generation = e->generation, 327 .entry_valid = calc_timeout_sec(e->entry_timeout), 328 .entry_valid_nsec = calc_timeout_nsec(e->entry_timeout), 329 .attr_valid = calc_timeout_sec(e->attr_timeout), 330 .attr_valid_nsec = calc_timeout_nsec(e->attr_timeout), 331 }; 332 convert_stat(&e->attr, &arg->attr); 333 334 arg->attr.flags = e->attr_flags; 335} 336 337/* 338 * `buf` is allowed to be empty so that the proper size may be 339 * allocated by the caller 340 */ 341size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, 342 const char *name, 343 const struct fuse_entry_param *e, off_t off) 344{ 345 (void)req; 346 size_t namelen; 347 size_t entlen; 348 size_t entlen_padded; 349 350 namelen = strlen(name); 351 entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen; 352 entlen_padded = FUSE_DIRENT_ALIGN(entlen); 353 if ((buf == NULL) || (entlen_padded > bufsize)) { 354 return entlen_padded; 355 } 356 357 struct fuse_direntplus *dp = (struct fuse_direntplus *)buf; 358 memset(&dp->entry_out, 0, sizeof(dp->entry_out)); 359 fill_entry(&dp->entry_out, e); 360 361 struct fuse_dirent *dirent = &dp->dirent; 362 *dirent = (struct fuse_dirent){ 363 .ino = e->attr.st_ino, 364 .off = off, 365 .namelen = namelen, 366 .type = (e->attr.st_mode & S_IFMT) >> 12, 367 }; 368 memcpy(dirent->name, name, namelen); 369 memset(dirent->name + namelen, 0, entlen_padded - entlen); 370 371 return entlen_padded; 372} 373 374static void fill_open(struct fuse_open_out *arg, const struct fuse_file_info *f) 375{ 376 arg->fh = f->fh; 377 if (f->direct_io) { 378 arg->open_flags |= FOPEN_DIRECT_IO; 379 } 380 if (f->keep_cache) { 381 arg->open_flags |= FOPEN_KEEP_CACHE; 382 } 383 if (f->cache_readdir) { 384 arg->open_flags |= FOPEN_CACHE_DIR; 385 } 386 if (f->nonseekable) { 387 arg->open_flags |= FOPEN_NONSEEKABLE; 388 } 389} 390 391int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e) 392{ 393 struct fuse_entry_out arg; 394 size_t size = sizeof(arg); 395 396 memset(&arg, 0, sizeof(arg)); 397 fill_entry(&arg, e); 398 return send_reply_ok(req, &arg, size); 399} 400 401int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, 402 const struct fuse_file_info *f) 403{ 404 char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)]; 405 size_t entrysize = sizeof(struct fuse_entry_out); 406 struct fuse_entry_out *earg = (struct fuse_entry_out *)buf; 407 struct fuse_open_out *oarg = (struct fuse_open_out *)(buf + entrysize); 408 409 memset(buf, 0, sizeof(buf)); 410 fill_entry(earg, e); 411 fill_open(oarg, f); 412 return send_reply_ok(req, buf, entrysize + sizeof(struct fuse_open_out)); 413} 414 415int fuse_reply_attr(fuse_req_t req, const struct stat *attr, 416 double attr_timeout) 417{ 418 struct fuse_attr_out arg; 419 size_t size = sizeof(arg); 420 421 memset(&arg, 0, sizeof(arg)); 422 arg.attr_valid = calc_timeout_sec(attr_timeout); 423 arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout); 424 convert_stat(attr, &arg.attr); 425 426 return send_reply_ok(req, &arg, size); 427} 428 429int fuse_reply_readlink(fuse_req_t req, const char *linkname) 430{ 431 return send_reply_ok(req, linkname, strlen(linkname)); 432} 433 434int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f) 435{ 436 struct fuse_open_out arg; 437 438 memset(&arg, 0, sizeof(arg)); 439 fill_open(&arg, f); 440 return send_reply_ok(req, &arg, sizeof(arg)); 441} 442 443int fuse_reply_write(fuse_req_t req, size_t count) 444{ 445 struct fuse_write_out arg; 446 447 memset(&arg, 0, sizeof(arg)); 448 arg.size = count; 449 450 return send_reply_ok(req, &arg, sizeof(arg)); 451} 452 453int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size) 454{ 455 return send_reply_ok(req, buf, size); 456} 457 458static int fuse_send_data_iov_fallback(struct fuse_session *se, 459 struct fuse_chan *ch, struct iovec *iov, 460 int iov_count, struct fuse_bufvec *buf, 461 size_t len) 462{ 463 /* Optimize common case */ 464 if (buf->count == 1 && buf->idx == 0 && buf->off == 0 && 465 !(buf->buf[0].flags & FUSE_BUF_IS_FD)) { 466 /* 467 * FIXME: also avoid memory copy if there are multiple buffers 468 * but none of them contain an fd 469 */ 470 471 iov[iov_count].iov_base = buf->buf[0].mem; 472 iov[iov_count].iov_len = len; 473 iov_count++; 474 return fuse_send_msg(se, ch, iov, iov_count); 475 } 476 477 if (fuse_lowlevel_is_virtio(se) && buf->count == 1 && 478 buf->buf[0].flags == (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK)) { 479 return virtio_send_data_iov(se, ch, iov, iov_count, buf, len); 480 } 481 482 abort(); /* Will have taken vhost path */ 483 return 0; 484} 485 486static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch, 487 struct iovec *iov, int iov_count, 488 struct fuse_bufvec *buf) 489{ 490 size_t len = fuse_buf_size(buf); 491 492 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len); 493} 494 495int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv) 496{ 497 struct iovec iov[2]; 498 struct fuse_out_header out = { 499 .unique = req->unique, 500 }; 501 int res; 502 503 iov[0].iov_base = &out; 504 iov[0].iov_len = sizeof(struct fuse_out_header); 505 506 res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv); 507 if (res <= 0) { 508 fuse_free_req(req); 509 return res; 510 } else { 511 return fuse_reply_err(req, res); 512 } 513} 514 515int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf) 516{ 517 struct fuse_statfs_out arg; 518 size_t size = sizeof(arg); 519 520 memset(&arg, 0, sizeof(arg)); 521 convert_statfs(stbuf, &arg.st); 522 523 return send_reply_ok(req, &arg, size); 524} 525 526int fuse_reply_xattr(fuse_req_t req, size_t count) 527{ 528 struct fuse_getxattr_out arg; 529 530 memset(&arg, 0, sizeof(arg)); 531 arg.size = count; 532 533 return send_reply_ok(req, &arg, sizeof(arg)); 534} 535 536int fuse_reply_lock(fuse_req_t req, const struct flock *lock) 537{ 538 struct fuse_lk_out arg; 539 540 memset(&arg, 0, sizeof(arg)); 541 arg.lk.type = lock->l_type; 542 if (lock->l_type != F_UNLCK) { 543 arg.lk.start = lock->l_start; 544 if (lock->l_len == 0) { 545 arg.lk.end = OFFSET_MAX; 546 } else { 547 arg.lk.end = lock->l_start + lock->l_len - 1; 548 } 549 } 550 arg.lk.pid = lock->l_pid; 551 return send_reply_ok(req, &arg, sizeof(arg)); 552} 553 554int fuse_reply_bmap(fuse_req_t req, uint64_t idx) 555{ 556 struct fuse_bmap_out arg; 557 558 memset(&arg, 0, sizeof(arg)); 559 arg.block = idx; 560 561 return send_reply_ok(req, &arg, sizeof(arg)); 562} 563 564static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov, 565 size_t count) 566{ 567 struct fuse_ioctl_iovec *fiov; 568 size_t i; 569 570 fiov = g_try_new(struct fuse_ioctl_iovec, count); 571 if (!fiov) { 572 return NULL; 573 } 574 575 for (i = 0; i < count; i++) { 576 fiov[i].base = (uintptr_t)iov[i].iov_base; 577 fiov[i].len = iov[i].iov_len; 578 } 579 580 return fiov; 581} 582 583int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, 584 size_t in_count, const struct iovec *out_iov, 585 size_t out_count) 586{ 587 struct fuse_ioctl_out arg; 588 g_autofree struct fuse_ioctl_iovec *in_fiov = NULL; 589 g_autofree struct fuse_ioctl_iovec *out_fiov = NULL; 590 struct iovec iov[4]; 591 size_t count = 1; 592 int res; 593 594 memset(&arg, 0, sizeof(arg)); 595 arg.flags |= FUSE_IOCTL_RETRY; 596 arg.in_iovs = in_count; 597 arg.out_iovs = out_count; 598 iov[count].iov_base = &arg; 599 iov[count].iov_len = sizeof(arg); 600 count++; 601 602 /* Can't handle non-compat 64bit ioctls on 32bit */ 603 if (sizeof(void *) == 4 && req->ioctl_64bit) { 604 res = fuse_reply_err(req, EINVAL); 605 return res; 606 } 607 608 if (in_count) { 609 in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count); 610 if (!in_fiov) { 611 res = fuse_reply_err(req, ENOMEM); 612 return res; 613 } 614 615 iov[count].iov_base = (void *)in_fiov; 616 iov[count].iov_len = sizeof(in_fiov[0]) * in_count; 617 count++; 618 } 619 if (out_count) { 620 out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count); 621 if (!out_fiov) { 622 res = fuse_reply_err(req, ENOMEM); 623 return res; 624 } 625 626 iov[count].iov_base = (void *)out_fiov; 627 iov[count].iov_len = sizeof(out_fiov[0]) * out_count; 628 count++; 629 } 630 631 res = send_reply_iov(req, 0, iov, count); 632 633 return res; 634} 635 636int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size) 637{ 638 struct fuse_ioctl_out arg; 639 struct iovec iov[3]; 640 size_t count = 1; 641 642 memset(&arg, 0, sizeof(arg)); 643 arg.result = result; 644 iov[count].iov_base = &arg; 645 iov[count].iov_len = sizeof(arg); 646 count++; 647 648 if (size) { 649 iov[count].iov_base = (char *)buf; 650 iov[count].iov_len = size; 651 count++; 652 } 653 654 return send_reply_iov(req, 0, iov, count); 655} 656 657int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, 658 int count) 659{ 660 g_autofree struct iovec *padded_iov = NULL; 661 struct fuse_ioctl_out arg; 662 int res; 663 664 padded_iov = g_try_new(struct iovec, count + 2); 665 if (padded_iov == NULL) { 666 return fuse_reply_err(req, ENOMEM); 667 } 668 669 memset(&arg, 0, sizeof(arg)); 670 arg.result = result; 671 padded_iov[1].iov_base = &arg; 672 padded_iov[1].iov_len = sizeof(arg); 673 674 memcpy(&padded_iov[2], iov, count * sizeof(struct iovec)); 675 676 res = send_reply_iov(req, 0, padded_iov, count + 2); 677 678 return res; 679} 680 681int fuse_reply_poll(fuse_req_t req, unsigned revents) 682{ 683 struct fuse_poll_out arg; 684 685 memset(&arg, 0, sizeof(arg)); 686 arg.revents = revents; 687 688 return send_reply_ok(req, &arg, sizeof(arg)); 689} 690 691int fuse_reply_lseek(fuse_req_t req, off_t off) 692{ 693 struct fuse_lseek_out arg; 694 695 memset(&arg, 0, sizeof(arg)); 696 arg.offset = off; 697 698 return send_reply_ok(req, &arg, sizeof(arg)); 699} 700 701static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, 702 struct fuse_mbuf_iter *iter) 703{ 704 const char *name = fuse_mbuf_iter_advance_str(iter); 705 if (!name) { 706 fuse_reply_err(req, EINVAL); 707 return; 708 } 709 710 if (req->se->op.lookup) { 711 req->se->op.lookup(req, nodeid, name); 712 } else { 713 fuse_reply_err(req, ENOSYS); 714 } 715} 716 717static void do_forget(fuse_req_t req, fuse_ino_t nodeid, 718 struct fuse_mbuf_iter *iter) 719{ 720 struct fuse_forget_in *arg; 721 722 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 723 if (!arg) { 724 fuse_reply_err(req, EINVAL); 725 return; 726 } 727 728 if (req->se->op.forget) { 729 req->se->op.forget(req, nodeid, arg->nlookup); 730 } else { 731 fuse_reply_none(req); 732 } 733} 734 735static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid, 736 struct fuse_mbuf_iter *iter) 737{ 738 struct fuse_batch_forget_in *arg; 739 struct fuse_forget_data *forgets; 740 size_t scount; 741 742 (void)nodeid; 743 744 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 745 if (!arg) { 746 fuse_reply_none(req); 747 return; 748 } 749 750 /* 751 * Prevent integer overflow. The compiler emits the following warning 752 * unless we use the scount local variable: 753 * 754 * error: comparison is always false due to limited range of data type 755 * [-Werror=type-limits] 756 * 757 * This may be true on 64-bit hosts but we need this check for 32-bit 758 * hosts. 759 */ 760 scount = arg->count; 761 if (scount > SIZE_MAX / sizeof(forgets[0])) { 762 fuse_reply_none(req); 763 return; 764 } 765 766 forgets = fuse_mbuf_iter_advance(iter, arg->count * sizeof(forgets[0])); 767 if (!forgets) { 768 fuse_reply_none(req); 769 return; 770 } 771 772 if (req->se->op.forget_multi) { 773 req->se->op.forget_multi(req, arg->count, forgets); 774 } else if (req->se->op.forget) { 775 unsigned int i; 776 777 for (i = 0; i < arg->count; i++) { 778 struct fuse_req *dummy_req; 779 780 dummy_req = fuse_ll_alloc_req(req->se); 781 if (dummy_req == NULL) { 782 break; 783 } 784 785 dummy_req->unique = req->unique; 786 dummy_req->ctx = req->ctx; 787 dummy_req->ch = NULL; 788 789 req->se->op.forget(dummy_req, forgets[i].ino, forgets[i].nlookup); 790 } 791 fuse_reply_none(req); 792 } else { 793 fuse_reply_none(req); 794 } 795} 796 797static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, 798 struct fuse_mbuf_iter *iter) 799{ 800 struct fuse_file_info *fip = NULL; 801 struct fuse_file_info fi; 802 803 struct fuse_getattr_in *arg; 804 805 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 806 if (!arg) { 807 fuse_reply_err(req, EINVAL); 808 return; 809 } 810 811 if (arg->getattr_flags & FUSE_GETATTR_FH) { 812 memset(&fi, 0, sizeof(fi)); 813 fi.fh = arg->fh; 814 fip = &fi; 815 } 816 817 if (req->se->op.getattr) { 818 req->se->op.getattr(req, nodeid, fip); 819 } else { 820 fuse_reply_err(req, ENOSYS); 821 } 822} 823 824static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, 825 struct fuse_mbuf_iter *iter) 826{ 827 if (req->se->op.setattr) { 828 struct fuse_setattr_in *arg; 829 struct fuse_file_info *fi = NULL; 830 struct fuse_file_info fi_store; 831 struct stat stbuf; 832 833 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 834 if (!arg) { 835 fuse_reply_err(req, EINVAL); 836 return; 837 } 838 839 memset(&stbuf, 0, sizeof(stbuf)); 840 convert_attr(arg, &stbuf); 841 if (arg->valid & FATTR_FH) { 842 arg->valid &= ~FATTR_FH; 843 memset(&fi_store, 0, sizeof(fi_store)); 844 fi = &fi_store; 845 fi->fh = arg->fh; 846 } 847 arg->valid &= FUSE_SET_ATTR_MODE | FUSE_SET_ATTR_UID | 848 FUSE_SET_ATTR_GID | FUSE_SET_ATTR_SIZE | 849 FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME | 850 FUSE_SET_ATTR_ATIME_NOW | FUSE_SET_ATTR_MTIME_NOW | 851 FUSE_SET_ATTR_CTIME | FUSE_SET_ATTR_KILL_SUIDGID; 852 853 req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi); 854 } else { 855 fuse_reply_err(req, ENOSYS); 856 } 857} 858 859static void do_access(fuse_req_t req, fuse_ino_t nodeid, 860 struct fuse_mbuf_iter *iter) 861{ 862 struct fuse_access_in *arg; 863 864 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 865 if (!arg) { 866 fuse_reply_err(req, EINVAL); 867 return; 868 } 869 870 if (req->se->op.access) { 871 req->se->op.access(req, nodeid, arg->mask); 872 } else { 873 fuse_reply_err(req, ENOSYS); 874 } 875} 876 877static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, 878 struct fuse_mbuf_iter *iter) 879{ 880 (void)iter; 881 882 if (req->se->op.readlink) { 883 req->se->op.readlink(req, nodeid); 884 } else { 885 fuse_reply_err(req, ENOSYS); 886 } 887} 888 889static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, 890 struct fuse_mbuf_iter *iter) 891{ 892 struct fuse_mknod_in *arg; 893 const char *name; 894 895 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 896 name = fuse_mbuf_iter_advance_str(iter); 897 if (!arg || !name) { 898 fuse_reply_err(req, EINVAL); 899 return; 900 } 901 902 req->ctx.umask = arg->umask; 903 904 if (req->se->op.mknod) { 905 req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev); 906 } else { 907 fuse_reply_err(req, ENOSYS); 908 } 909} 910 911static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, 912 struct fuse_mbuf_iter *iter) 913{ 914 struct fuse_mkdir_in *arg; 915 const char *name; 916 917 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 918 name = fuse_mbuf_iter_advance_str(iter); 919 if (!arg || !name) { 920 fuse_reply_err(req, EINVAL); 921 return; 922 } 923 924 req->ctx.umask = arg->umask; 925 926 if (req->se->op.mkdir) { 927 req->se->op.mkdir(req, nodeid, name, arg->mode); 928 } else { 929 fuse_reply_err(req, ENOSYS); 930 } 931} 932 933static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, 934 struct fuse_mbuf_iter *iter) 935{ 936 const char *name = fuse_mbuf_iter_advance_str(iter); 937 938 if (!name) { 939 fuse_reply_err(req, EINVAL); 940 return; 941 } 942 943 if (req->se->op.unlink) { 944 req->se->op.unlink(req, nodeid, name); 945 } else { 946 fuse_reply_err(req, ENOSYS); 947 } 948} 949 950static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, 951 struct fuse_mbuf_iter *iter) 952{ 953 const char *name = fuse_mbuf_iter_advance_str(iter); 954 955 if (!name) { 956 fuse_reply_err(req, EINVAL); 957 return; 958 } 959 960 if (req->se->op.rmdir) { 961 req->se->op.rmdir(req, nodeid, name); 962 } else { 963 fuse_reply_err(req, ENOSYS); 964 } 965} 966 967static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, 968 struct fuse_mbuf_iter *iter) 969{ 970 const char *name = fuse_mbuf_iter_advance_str(iter); 971 const char *linkname = fuse_mbuf_iter_advance_str(iter); 972 973 if (!name || !linkname) { 974 fuse_reply_err(req, EINVAL); 975 return; 976 } 977 978 if (req->se->op.symlink) { 979 req->se->op.symlink(req, linkname, nodeid, name); 980 } else { 981 fuse_reply_err(req, ENOSYS); 982 } 983} 984 985static void do_rename(fuse_req_t req, fuse_ino_t nodeid, 986 struct fuse_mbuf_iter *iter) 987{ 988 struct fuse_rename_in *arg; 989 const char *oldname; 990 const char *newname; 991 992 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 993 oldname = fuse_mbuf_iter_advance_str(iter); 994 newname = fuse_mbuf_iter_advance_str(iter); 995 if (!arg || !oldname || !newname) { 996 fuse_reply_err(req, EINVAL); 997 return; 998 } 999 1000 if (req->se->op.rename) { 1001 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname, 0); 1002 } else { 1003 fuse_reply_err(req, ENOSYS); 1004 } 1005} 1006 1007static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, 1008 struct fuse_mbuf_iter *iter) 1009{ 1010 struct fuse_rename2_in *arg; 1011 const char *oldname; 1012 const char *newname; 1013 1014 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1015 oldname = fuse_mbuf_iter_advance_str(iter); 1016 newname = fuse_mbuf_iter_advance_str(iter); 1017 if (!arg || !oldname || !newname) { 1018 fuse_reply_err(req, EINVAL); 1019 return; 1020 } 1021 1022 if (req->se->op.rename) { 1023 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname, 1024 arg->flags); 1025 } else { 1026 fuse_reply_err(req, ENOSYS); 1027 } 1028} 1029 1030static void do_link(fuse_req_t req, fuse_ino_t nodeid, 1031 struct fuse_mbuf_iter *iter) 1032{ 1033 struct fuse_link_in *arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1034 const char *name = fuse_mbuf_iter_advance_str(iter); 1035 1036 if (!arg || !name) { 1037 fuse_reply_err(req, EINVAL); 1038 return; 1039 } 1040 1041 if (req->se->op.link) { 1042 req->se->op.link(req, arg->oldnodeid, nodeid, name); 1043 } else { 1044 fuse_reply_err(req, ENOSYS); 1045 } 1046} 1047 1048static void do_create(fuse_req_t req, fuse_ino_t nodeid, 1049 struct fuse_mbuf_iter *iter) 1050{ 1051 if (req->se->op.create) { 1052 struct fuse_create_in *arg; 1053 struct fuse_file_info fi; 1054 const char *name; 1055 1056 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1057 name = fuse_mbuf_iter_advance_str(iter); 1058 if (!arg || !name) { 1059 fuse_reply_err(req, EINVAL); 1060 return; 1061 } 1062 1063 memset(&fi, 0, sizeof(fi)); 1064 fi.flags = arg->flags; 1065 fi.kill_priv = arg->open_flags & FUSE_OPEN_KILL_SUIDGID; 1066 1067 req->ctx.umask = arg->umask; 1068 1069 req->se->op.create(req, nodeid, name, arg->mode, &fi); 1070 } else { 1071 fuse_reply_err(req, ENOSYS); 1072 } 1073} 1074 1075static void do_open(fuse_req_t req, fuse_ino_t nodeid, 1076 struct fuse_mbuf_iter *iter) 1077{ 1078 struct fuse_open_in *arg; 1079 struct fuse_file_info fi; 1080 1081 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1082 if (!arg) { 1083 fuse_reply_err(req, EINVAL); 1084 return; 1085 } 1086 1087 /* File creation is handled by do_create() or do_mknod() */ 1088 if (arg->flags & (O_CREAT | O_TMPFILE)) { 1089 fuse_reply_err(req, EINVAL); 1090 return; 1091 } 1092 1093 memset(&fi, 0, sizeof(fi)); 1094 fi.flags = arg->flags; 1095 fi.kill_priv = arg->open_flags & FUSE_OPEN_KILL_SUIDGID; 1096 1097 if (req->se->op.open) { 1098 req->se->op.open(req, nodeid, &fi); 1099 } else { 1100 fuse_reply_open(req, &fi); 1101 } 1102} 1103 1104static void do_read(fuse_req_t req, fuse_ino_t nodeid, 1105 struct fuse_mbuf_iter *iter) 1106{ 1107 if (req->se->op.read) { 1108 struct fuse_read_in *arg; 1109 struct fuse_file_info fi; 1110 1111 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1112 if (!arg) { 1113 fuse_reply_err(req, EINVAL); 1114 return; 1115 } 1116 1117 memset(&fi, 0, sizeof(fi)); 1118 fi.fh = arg->fh; 1119 fi.lock_owner = arg->lock_owner; 1120 fi.flags = arg->flags; 1121 req->se->op.read(req, nodeid, arg->size, arg->offset, &fi); 1122 } else { 1123 fuse_reply_err(req, ENOSYS); 1124 } 1125} 1126 1127static void do_write(fuse_req_t req, fuse_ino_t nodeid, 1128 struct fuse_mbuf_iter *iter) 1129{ 1130 struct fuse_write_in *arg; 1131 struct fuse_file_info fi; 1132 const char *param; 1133 1134 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1135 if (!arg) { 1136 fuse_reply_err(req, EINVAL); 1137 return; 1138 } 1139 1140 param = fuse_mbuf_iter_advance(iter, arg->size); 1141 if (!param) { 1142 fuse_reply_err(req, EINVAL); 1143 return; 1144 } 1145 1146 memset(&fi, 0, sizeof(fi)); 1147 fi.fh = arg->fh; 1148 fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0; 1149 fi.kill_priv = !!(arg->write_flags & FUSE_WRITE_KILL_PRIV); 1150 1151 fi.lock_owner = arg->lock_owner; 1152 fi.flags = arg->flags; 1153 1154 if (req->se->op.write) { 1155 req->se->op.write(req, nodeid, param, arg->size, arg->offset, &fi); 1156 } else { 1157 fuse_reply_err(req, ENOSYS); 1158 } 1159} 1160 1161static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, 1162 struct fuse_mbuf_iter *iter, struct fuse_bufvec *ibufv) 1163{ 1164 struct fuse_session *se = req->se; 1165 struct fuse_bufvec *pbufv = ibufv; 1166 struct fuse_bufvec tmpbufv = { 1167 .buf[0] = ibufv->buf[0], 1168 .count = 1, 1169 }; 1170 struct fuse_write_in *arg; 1171 size_t arg_size = sizeof(*arg); 1172 struct fuse_file_info fi; 1173 1174 memset(&fi, 0, sizeof(fi)); 1175 1176 arg = fuse_mbuf_iter_advance(iter, arg_size); 1177 if (!arg) { 1178 fuse_reply_err(req, EINVAL); 1179 return; 1180 } 1181 1182 fi.lock_owner = arg->lock_owner; 1183 fi.flags = arg->flags; 1184 fi.fh = arg->fh; 1185 fi.writepage = !!(arg->write_flags & FUSE_WRITE_CACHE); 1186 fi.kill_priv = !!(arg->write_flags & FUSE_WRITE_KILL_PRIV); 1187 1188 if (ibufv->count == 1) { 1189 assert(!(tmpbufv.buf[0].flags & FUSE_BUF_IS_FD)); 1190 tmpbufv.buf[0].mem = ((char *)arg) + arg_size; 1191 tmpbufv.buf[0].size -= sizeof(struct fuse_in_header) + arg_size; 1192 pbufv = &tmpbufv; 1193 } else { 1194 /* 1195 * Input bufv contains the headers in the first element 1196 * and the data in the rest, we need to skip that first element 1197 */ 1198 ibufv->buf[0].size = 0; 1199 } 1200 1201 if (fuse_buf_size(pbufv) != arg->size) { 1202 fuse_log(FUSE_LOG_ERR, 1203 "fuse: do_write_buf: buffer size doesn't match arg->size\n"); 1204 fuse_reply_err(req, EIO); 1205 return; 1206 } 1207 1208 se->op.write_buf(req, nodeid, pbufv, arg->offset, &fi); 1209} 1210 1211static void do_flush(fuse_req_t req, fuse_ino_t nodeid, 1212 struct fuse_mbuf_iter *iter) 1213{ 1214 struct fuse_flush_in *arg; 1215 struct fuse_file_info fi; 1216 1217 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1218 if (!arg) { 1219 fuse_reply_err(req, EINVAL); 1220 return; 1221 } 1222 1223 memset(&fi, 0, sizeof(fi)); 1224 fi.fh = arg->fh; 1225 fi.flush = 1; 1226 fi.lock_owner = arg->lock_owner; 1227 1228 if (req->se->op.flush) { 1229 req->se->op.flush(req, nodeid, &fi); 1230 } else { 1231 fuse_reply_err(req, ENOSYS); 1232 } 1233} 1234 1235static void do_release(fuse_req_t req, fuse_ino_t nodeid, 1236 struct fuse_mbuf_iter *iter) 1237{ 1238 struct fuse_release_in *arg; 1239 struct fuse_file_info fi; 1240 1241 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1242 if (!arg) { 1243 fuse_reply_err(req, EINVAL); 1244 return; 1245 } 1246 1247 memset(&fi, 0, sizeof(fi)); 1248 fi.flags = arg->flags; 1249 fi.fh = arg->fh; 1250 fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0; 1251 fi.lock_owner = arg->lock_owner; 1252 1253 if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) { 1254 fi.flock_release = 1; 1255 } 1256 1257 if (req->se->op.release) { 1258 req->se->op.release(req, nodeid, &fi); 1259 } else { 1260 fuse_reply_err(req, 0); 1261 } 1262} 1263 1264static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, 1265 struct fuse_mbuf_iter *iter) 1266{ 1267 struct fuse_fsync_in *arg; 1268 struct fuse_file_info fi; 1269 int datasync; 1270 1271 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1272 if (!arg) { 1273 fuse_reply_err(req, EINVAL); 1274 return; 1275 } 1276 datasync = arg->fsync_flags & 1; 1277 1278 memset(&fi, 0, sizeof(fi)); 1279 fi.fh = arg->fh; 1280 1281 if (req->se->op.fsync) { 1282 if (fi.fh == (uint64_t)-1) { 1283 req->se->op.fsync(req, nodeid, datasync, NULL); 1284 } else { 1285 req->se->op.fsync(req, nodeid, datasync, &fi); 1286 } 1287 } else { 1288 fuse_reply_err(req, ENOSYS); 1289 } 1290} 1291 1292static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, 1293 struct fuse_mbuf_iter *iter) 1294{ 1295 struct fuse_open_in *arg; 1296 struct fuse_file_info fi; 1297 1298 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1299 if (!arg) { 1300 fuse_reply_err(req, EINVAL); 1301 return; 1302 } 1303 1304 memset(&fi, 0, sizeof(fi)); 1305 fi.flags = arg->flags; 1306 1307 if (req->se->op.opendir) { 1308 req->se->op.opendir(req, nodeid, &fi); 1309 } else { 1310 fuse_reply_open(req, &fi); 1311 } 1312} 1313 1314static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, 1315 struct fuse_mbuf_iter *iter) 1316{ 1317 struct fuse_read_in *arg; 1318 struct fuse_file_info fi; 1319 1320 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1321 if (!arg) { 1322 fuse_reply_err(req, EINVAL); 1323 return; 1324 } 1325 1326 memset(&fi, 0, sizeof(fi)); 1327 fi.fh = arg->fh; 1328 1329 if (req->se->op.readdir) { 1330 req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi); 1331 } else { 1332 fuse_reply_err(req, ENOSYS); 1333 } 1334} 1335 1336static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, 1337 struct fuse_mbuf_iter *iter) 1338{ 1339 struct fuse_read_in *arg; 1340 struct fuse_file_info fi; 1341 1342 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1343 if (!arg) { 1344 fuse_reply_err(req, EINVAL); 1345 return; 1346 } 1347 1348 memset(&fi, 0, sizeof(fi)); 1349 fi.fh = arg->fh; 1350 1351 if (req->se->op.readdirplus) { 1352 req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi); 1353 } else { 1354 fuse_reply_err(req, ENOSYS); 1355 } 1356} 1357 1358static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, 1359 struct fuse_mbuf_iter *iter) 1360{ 1361 struct fuse_release_in *arg; 1362 struct fuse_file_info fi; 1363 1364 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1365 if (!arg) { 1366 fuse_reply_err(req, EINVAL); 1367 return; 1368 } 1369 1370 memset(&fi, 0, sizeof(fi)); 1371 fi.flags = arg->flags; 1372 fi.fh = arg->fh; 1373 1374 if (req->se->op.releasedir) { 1375 req->se->op.releasedir(req, nodeid, &fi); 1376 } else { 1377 fuse_reply_err(req, 0); 1378 } 1379} 1380 1381static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, 1382 struct fuse_mbuf_iter *iter) 1383{ 1384 struct fuse_fsync_in *arg; 1385 struct fuse_file_info fi; 1386 int datasync; 1387 1388 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1389 if (!arg) { 1390 fuse_reply_err(req, EINVAL); 1391 return; 1392 } 1393 datasync = arg->fsync_flags & 1; 1394 1395 memset(&fi, 0, sizeof(fi)); 1396 fi.fh = arg->fh; 1397 1398 if (req->se->op.fsyncdir) { 1399 req->se->op.fsyncdir(req, nodeid, datasync, &fi); 1400 } else { 1401 fuse_reply_err(req, ENOSYS); 1402 } 1403} 1404 1405static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, 1406 struct fuse_mbuf_iter *iter) 1407{ 1408 (void)nodeid; 1409 (void)iter; 1410 1411 if (req->se->op.statfs) { 1412 req->se->op.statfs(req, nodeid); 1413 } else { 1414 struct statvfs buf = { 1415 .f_namemax = 255, 1416 .f_bsize = 512, 1417 }; 1418 fuse_reply_statfs(req, &buf); 1419 } 1420} 1421 1422static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, 1423 struct fuse_mbuf_iter *iter) 1424{ 1425 struct fuse_setxattr_in *arg; 1426 const char *name; 1427 const char *value; 1428 bool setxattr_ext = req->se->conn.want & FUSE_CAP_SETXATTR_EXT; 1429 1430 if (setxattr_ext) { 1431 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1432 } else { 1433 arg = fuse_mbuf_iter_advance(iter, FUSE_COMPAT_SETXATTR_IN_SIZE); 1434 } 1435 name = fuse_mbuf_iter_advance_str(iter); 1436 if (!arg || !name) { 1437 fuse_reply_err(req, EINVAL); 1438 return; 1439 } 1440 1441 value = fuse_mbuf_iter_advance(iter, arg->size); 1442 if (!value) { 1443 fuse_reply_err(req, EINVAL); 1444 return; 1445 } 1446 1447 if (req->se->op.setxattr) { 1448 uint32_t setxattr_flags = setxattr_ext ? arg->setxattr_flags : 0; 1449 req->se->op.setxattr(req, nodeid, name, value, arg->size, arg->flags, 1450 setxattr_flags); 1451 } else { 1452 fuse_reply_err(req, ENOSYS); 1453 } 1454} 1455 1456static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, 1457 struct fuse_mbuf_iter *iter) 1458{ 1459 struct fuse_getxattr_in *arg; 1460 const char *name; 1461 1462 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1463 name = fuse_mbuf_iter_advance_str(iter); 1464 if (!arg || !name) { 1465 fuse_reply_err(req, EINVAL); 1466 return; 1467 } 1468 1469 if (req->se->op.getxattr) { 1470 req->se->op.getxattr(req, nodeid, name, arg->size); 1471 } else { 1472 fuse_reply_err(req, ENOSYS); 1473 } 1474} 1475 1476static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, 1477 struct fuse_mbuf_iter *iter) 1478{ 1479 struct fuse_getxattr_in *arg; 1480 1481 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1482 if (!arg) { 1483 fuse_reply_err(req, EINVAL); 1484 return; 1485 } 1486 1487 if (req->se->op.listxattr) { 1488 req->se->op.listxattr(req, nodeid, arg->size); 1489 } else { 1490 fuse_reply_err(req, ENOSYS); 1491 } 1492} 1493 1494static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, 1495 struct fuse_mbuf_iter *iter) 1496{ 1497 const char *name = fuse_mbuf_iter_advance_str(iter); 1498 1499 if (!name) { 1500 fuse_reply_err(req, EINVAL); 1501 return; 1502 } 1503 1504 if (req->se->op.removexattr) { 1505 req->se->op.removexattr(req, nodeid, name); 1506 } else { 1507 fuse_reply_err(req, ENOSYS); 1508 } 1509} 1510 1511static void convert_fuse_file_lock(struct fuse_file_lock *fl, 1512 struct flock *flock) 1513{ 1514 memset(flock, 0, sizeof(struct flock)); 1515 flock->l_type = fl->type; 1516 flock->l_whence = SEEK_SET; 1517 flock->l_start = fl->start; 1518 if (fl->end == OFFSET_MAX) { 1519 flock->l_len = 0; 1520 } else { 1521 flock->l_len = fl->end - fl->start + 1; 1522 } 1523 flock->l_pid = fl->pid; 1524} 1525 1526static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, 1527 struct fuse_mbuf_iter *iter) 1528{ 1529 struct fuse_lk_in *arg; 1530 struct fuse_file_info fi; 1531 struct flock flock; 1532 1533 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1534 if (!arg) { 1535 fuse_reply_err(req, EINVAL); 1536 return; 1537 } 1538 1539 memset(&fi, 0, sizeof(fi)); 1540 fi.fh = arg->fh; 1541 fi.lock_owner = arg->owner; 1542 1543 convert_fuse_file_lock(&arg->lk, &flock); 1544 if (req->se->op.getlk) { 1545 req->se->op.getlk(req, nodeid, &fi, &flock); 1546 } else { 1547 fuse_reply_err(req, ENOSYS); 1548 } 1549} 1550 1551static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid, 1552 struct fuse_mbuf_iter *iter, int sleep) 1553{ 1554 struct fuse_lk_in *arg; 1555 struct fuse_file_info fi; 1556 struct flock flock; 1557 1558 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1559 if (!arg) { 1560 fuse_reply_err(req, EINVAL); 1561 return; 1562 } 1563 1564 memset(&fi, 0, sizeof(fi)); 1565 fi.fh = arg->fh; 1566 fi.lock_owner = arg->owner; 1567 1568 if (arg->lk_flags & FUSE_LK_FLOCK) { 1569 int op = 0; 1570 1571 switch (arg->lk.type) { 1572 case F_RDLCK: 1573 op = LOCK_SH; 1574 break; 1575 case F_WRLCK: 1576 op = LOCK_EX; 1577 break; 1578 case F_UNLCK: 1579 op = LOCK_UN; 1580 break; 1581 } 1582 if (!sleep) { 1583 op |= LOCK_NB; 1584 } 1585 1586 if (req->se->op.flock) { 1587 req->se->op.flock(req, nodeid, &fi, op); 1588 } else { 1589 fuse_reply_err(req, ENOSYS); 1590 } 1591 } else { 1592 convert_fuse_file_lock(&arg->lk, &flock); 1593 if (req->se->op.setlk) { 1594 req->se->op.setlk(req, nodeid, &fi, &flock, sleep); 1595 } else { 1596 fuse_reply_err(req, ENOSYS); 1597 } 1598 } 1599} 1600 1601static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, 1602 struct fuse_mbuf_iter *iter) 1603{ 1604 do_setlk_common(req, nodeid, iter, 0); 1605} 1606 1607static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, 1608 struct fuse_mbuf_iter *iter) 1609{ 1610 do_setlk_common(req, nodeid, iter, 1); 1611} 1612 1613static int find_interrupted(struct fuse_session *se, struct fuse_req *req) 1614{ 1615 struct fuse_req *curr; 1616 1617 for (curr = se->list.next; curr != &se->list; curr = curr->next) { 1618 if (curr->unique == req->u.i.unique) { 1619 fuse_interrupt_func_t func; 1620 void *data; 1621 1622 curr->ctr++; 1623 pthread_mutex_unlock(&se->lock); 1624 1625 /* Ugh, ugly locking */ 1626 pthread_mutex_lock(&curr->lock); 1627 pthread_mutex_lock(&se->lock); 1628 curr->interrupted = 1; 1629 func = curr->u.ni.func; 1630 data = curr->u.ni.data; 1631 pthread_mutex_unlock(&se->lock); 1632 if (func) { 1633 func(curr, data); 1634 } 1635 pthread_mutex_unlock(&curr->lock); 1636 1637 pthread_mutex_lock(&se->lock); 1638 curr->ctr--; 1639 if (!curr->ctr) { 1640 destroy_req(curr); 1641 } 1642 1643 return 1; 1644 } 1645 } 1646 for (curr = se->interrupts.next; curr != &se->interrupts; 1647 curr = curr->next) { 1648 if (curr->u.i.unique == req->u.i.unique) { 1649 return 1; 1650 } 1651 } 1652 return 0; 1653} 1654 1655static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, 1656 struct fuse_mbuf_iter *iter) 1657{ 1658 struct fuse_interrupt_in *arg; 1659 struct fuse_session *se = req->se; 1660 1661 (void)nodeid; 1662 1663 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1664 if (!arg) { 1665 fuse_reply_err(req, EINVAL); 1666 return; 1667 } 1668 1669 fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n", 1670 (unsigned long long)arg->unique); 1671 1672 req->u.i.unique = arg->unique; 1673 1674 pthread_mutex_lock(&se->lock); 1675 if (find_interrupted(se, req)) { 1676 destroy_req(req); 1677 } else { 1678 list_add_req(req, &se->interrupts); 1679 } 1680 pthread_mutex_unlock(&se->lock); 1681} 1682 1683static struct fuse_req *check_interrupt(struct fuse_session *se, 1684 struct fuse_req *req) 1685{ 1686 struct fuse_req *curr; 1687 1688 for (curr = se->interrupts.next; curr != &se->interrupts; 1689 curr = curr->next) { 1690 if (curr->u.i.unique == req->unique) { 1691 req->interrupted = 1; 1692 list_del_req(curr); 1693 g_free(curr); 1694 return NULL; 1695 } 1696 } 1697 curr = se->interrupts.next; 1698 if (curr != &se->interrupts) { 1699 list_del_req(curr); 1700 list_init_req(curr); 1701 return curr; 1702 } else { 1703 return NULL; 1704 } 1705} 1706 1707static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, 1708 struct fuse_mbuf_iter *iter) 1709{ 1710 struct fuse_bmap_in *arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1711 1712 if (!arg) { 1713 fuse_reply_err(req, EINVAL); 1714 return; 1715 } 1716 1717 if (req->se->op.bmap) { 1718 req->se->op.bmap(req, nodeid, arg->blocksize, arg->block); 1719 } else { 1720 fuse_reply_err(req, ENOSYS); 1721 } 1722} 1723 1724static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, 1725 struct fuse_mbuf_iter *iter) 1726{ 1727 struct fuse_ioctl_in *arg; 1728 unsigned int flags; 1729 void *in_buf = NULL; 1730 struct fuse_file_info fi; 1731 1732 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1733 if (!arg) { 1734 fuse_reply_err(req, EINVAL); 1735 return; 1736 } 1737 1738 flags = arg->flags; 1739 if (flags & FUSE_IOCTL_DIR && !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) { 1740 fuse_reply_err(req, ENOTTY); 1741 return; 1742 } 1743 1744 if (arg->in_size) { 1745 in_buf = fuse_mbuf_iter_advance(iter, arg->in_size); 1746 if (!in_buf) { 1747 fuse_reply_err(req, EINVAL); 1748 return; 1749 } 1750 } 1751 1752 memset(&fi, 0, sizeof(fi)); 1753 fi.fh = arg->fh; 1754 1755 if (sizeof(void *) == 4 && !(flags & FUSE_IOCTL_32BIT)) { 1756 req->ioctl_64bit = 1; 1757 } 1758 1759 if (req->se->op.ioctl) { 1760 req->se->op.ioctl(req, nodeid, arg->cmd, (void *)(uintptr_t)arg->arg, 1761 &fi, flags, in_buf, arg->in_size, arg->out_size); 1762 } else { 1763 fuse_reply_err(req, ENOSYS); 1764 } 1765} 1766 1767void fuse_pollhandle_destroy(struct fuse_pollhandle *ph) 1768{ 1769 free(ph); 1770} 1771 1772static void do_poll(fuse_req_t req, fuse_ino_t nodeid, 1773 struct fuse_mbuf_iter *iter) 1774{ 1775 struct fuse_poll_in *arg; 1776 struct fuse_file_info fi; 1777 1778 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1779 if (!arg) { 1780 fuse_reply_err(req, EINVAL); 1781 return; 1782 } 1783 1784 memset(&fi, 0, sizeof(fi)); 1785 fi.fh = arg->fh; 1786 fi.poll_events = arg->events; 1787 1788 if (req->se->op.poll) { 1789 struct fuse_pollhandle *ph = NULL; 1790 1791 if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) { 1792 ph = malloc(sizeof(struct fuse_pollhandle)); 1793 if (ph == NULL) { 1794 fuse_reply_err(req, ENOMEM); 1795 return; 1796 } 1797 ph->kh = arg->kh; 1798 ph->se = req->se; 1799 } 1800 1801 req->se->op.poll(req, nodeid, &fi, ph); 1802 } else { 1803 fuse_reply_err(req, ENOSYS); 1804 } 1805} 1806 1807static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, 1808 struct fuse_mbuf_iter *iter) 1809{ 1810 struct fuse_fallocate_in *arg; 1811 struct fuse_file_info fi; 1812 1813 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1814 if (!arg) { 1815 fuse_reply_err(req, EINVAL); 1816 return; 1817 } 1818 1819 memset(&fi, 0, sizeof(fi)); 1820 fi.fh = arg->fh; 1821 1822 if (req->se->op.fallocate) { 1823 req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, 1824 &fi); 1825 } else { 1826 fuse_reply_err(req, ENOSYS); 1827 } 1828} 1829 1830static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, 1831 struct fuse_mbuf_iter *iter) 1832{ 1833 struct fuse_copy_file_range_in *arg; 1834 struct fuse_file_info fi_in, fi_out; 1835 1836 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1837 if (!arg) { 1838 fuse_reply_err(req, EINVAL); 1839 return; 1840 } 1841 1842 memset(&fi_in, 0, sizeof(fi_in)); 1843 fi_in.fh = arg->fh_in; 1844 1845 memset(&fi_out, 0, sizeof(fi_out)); 1846 fi_out.fh = arg->fh_out; 1847 1848 1849 if (req->se->op.copy_file_range) { 1850 req->se->op.copy_file_range(req, nodeid_in, arg->off_in, &fi_in, 1851 arg->nodeid_out, arg->off_out, &fi_out, 1852 arg->len, arg->flags); 1853 } else { 1854 fuse_reply_err(req, ENOSYS); 1855 } 1856} 1857 1858static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, 1859 struct fuse_mbuf_iter *iter) 1860{ 1861 struct fuse_lseek_in *arg; 1862 struct fuse_file_info fi; 1863 1864 arg = fuse_mbuf_iter_advance(iter, sizeof(*arg)); 1865 if (!arg) { 1866 fuse_reply_err(req, EINVAL); 1867 return; 1868 } 1869 memset(&fi, 0, sizeof(fi)); 1870 fi.fh = arg->fh; 1871 1872 if (req->se->op.lseek) { 1873 req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi); 1874 } else { 1875 fuse_reply_err(req, ENOSYS); 1876 } 1877} 1878 1879static void do_init(fuse_req_t req, fuse_ino_t nodeid, 1880 struct fuse_mbuf_iter *iter) 1881{ 1882 size_t compat_size = offsetof(struct fuse_init_in, max_readahead); 1883 struct fuse_init_in *arg; 1884 struct fuse_init_out outarg; 1885 struct fuse_session *se = req->se; 1886 size_t bufsize = se->bufsize; 1887 size_t outargsize = sizeof(outarg); 1888 1889 (void)nodeid; 1890 1891 /* First consume the old fields... */ 1892 arg = fuse_mbuf_iter_advance(iter, compat_size); 1893 if (!arg) { 1894 fuse_reply_err(req, EINVAL); 1895 return; 1896 } 1897 1898 /* ...and now consume the new fields. */ 1899 if (arg->major == 7 && arg->minor >= 6) { 1900 if (!fuse_mbuf_iter_advance(iter, sizeof(*arg) - compat_size)) { 1901 fuse_reply_err(req, EINVAL); 1902 return; 1903 } 1904 } 1905 1906 fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor); 1907 if (arg->major == 7 && arg->minor >= 6) { 1908 fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags); 1909 fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n", arg->max_readahead); 1910 } 1911 se->conn.proto_major = arg->major; 1912 se->conn.proto_minor = arg->minor; 1913 se->conn.capable = 0; 1914 se->conn.want = 0; 1915 1916 memset(&outarg, 0, sizeof(outarg)); 1917 outarg.major = FUSE_KERNEL_VERSION; 1918 outarg.minor = FUSE_KERNEL_MINOR_VERSION; 1919 1920 if (arg->major < 7 || (arg->major == 7 && arg->minor < 31)) { 1921 fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n", 1922 arg->major, arg->minor); 1923 fuse_reply_err(req, EPROTO); 1924 return; 1925 } 1926 1927 if (arg->major > 7) { 1928 /* Wait for a second INIT request with a 7.X version */ 1929 send_reply_ok(req, &outarg, sizeof(outarg)); 1930 return; 1931 } 1932 1933 if (arg->max_readahead < se->conn.max_readahead) { 1934 se->conn.max_readahead = arg->max_readahead; 1935 } 1936 if (arg->flags & FUSE_ASYNC_READ) { 1937 se->conn.capable |= FUSE_CAP_ASYNC_READ; 1938 } 1939 if (arg->flags & FUSE_POSIX_LOCKS) { 1940 se->conn.capable |= FUSE_CAP_POSIX_LOCKS; 1941 } 1942 if (arg->flags & FUSE_ATOMIC_O_TRUNC) { 1943 se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC; 1944 } 1945 if (arg->flags & FUSE_EXPORT_SUPPORT) { 1946 se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT; 1947 } 1948 if (arg->flags & FUSE_DONT_MASK) { 1949 se->conn.capable |= FUSE_CAP_DONT_MASK; 1950 } 1951 if (arg->flags & FUSE_FLOCK_LOCKS) { 1952 se->conn.capable |= FUSE_CAP_FLOCK_LOCKS; 1953 } 1954 if (arg->flags & FUSE_AUTO_INVAL_DATA) { 1955 se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA; 1956 } 1957 if (arg->flags & FUSE_DO_READDIRPLUS) { 1958 se->conn.capable |= FUSE_CAP_READDIRPLUS; 1959 } 1960 if (arg->flags & FUSE_READDIRPLUS_AUTO) { 1961 se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO; 1962 } 1963 if (arg->flags & FUSE_ASYNC_DIO) { 1964 se->conn.capable |= FUSE_CAP_ASYNC_DIO; 1965 } 1966 if (arg->flags & FUSE_WRITEBACK_CACHE) { 1967 se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE; 1968 } 1969 if (arg->flags & FUSE_NO_OPEN_SUPPORT) { 1970 se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT; 1971 } 1972 if (arg->flags & FUSE_PARALLEL_DIROPS) { 1973 se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS; 1974 } 1975 if (arg->flags & FUSE_POSIX_ACL) { 1976 se->conn.capable |= FUSE_CAP_POSIX_ACL; 1977 } 1978 if (arg->flags & FUSE_HANDLE_KILLPRIV) { 1979 se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV; 1980 } 1981 if (arg->flags & FUSE_NO_OPENDIR_SUPPORT) { 1982 se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT; 1983 } 1984 if (!(arg->flags & FUSE_MAX_PAGES)) { 1985 size_t max_bufsize = FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize() + 1986 FUSE_BUFFER_HEADER_SIZE; 1987 if (bufsize > max_bufsize) { 1988 bufsize = max_bufsize; 1989 } 1990 } 1991 if (arg->flags & FUSE_SUBMOUNTS) { 1992 se->conn.capable |= FUSE_CAP_SUBMOUNTS; 1993 } 1994 if (arg->flags & FUSE_HANDLE_KILLPRIV_V2) { 1995 se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV_V2; 1996 } 1997 if (arg->flags & FUSE_SETXATTR_EXT) { 1998 se->conn.capable |= FUSE_CAP_SETXATTR_EXT; 1999 } 2000#ifdef HAVE_SPLICE 2001#ifdef HAVE_VMSPLICE 2002 se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE; 2003#endif 2004 se->conn.capable |= FUSE_CAP_SPLICE_READ; 2005#endif 2006 se->conn.capable |= FUSE_CAP_IOCTL_DIR; 2007 2008 /* 2009 * Default settings for modern filesystems. 2010 * 2011 * Most of these capabilities were disabled by default in 2012 * libfuse2 for backwards compatibility reasons. In libfuse3, 2013 * we can finally enable them by default (as long as they're 2014 * supported by the kernel). 2015 */ 2016#define LL_SET_DEFAULT(cond, cap) \ 2017 if ((cond) && (se->conn.capable & (cap))) \ 2018 se->conn.want |= (cap) 2019 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ); 2020 LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS); 2021 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA); 2022 LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV); 2023 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO); 2024 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR); 2025 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC); 2026 LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ); 2027 LL_SET_DEFAULT(se->op.getlk && se->op.setlk, FUSE_CAP_POSIX_LOCKS); 2028 LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS); 2029 LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS); 2030 LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir, 2031 FUSE_CAP_READDIRPLUS_AUTO); 2032 se->conn.time_gran = 1; 2033 2034 if (bufsize < FUSE_MIN_READ_BUFFER) { 2035 fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n", 2036 bufsize); 2037 bufsize = FUSE_MIN_READ_BUFFER; 2038 } 2039 se->bufsize = bufsize; 2040 2041 if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE) { 2042 se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE; 2043 } 2044 2045 se->got_init = 1; 2046 se->got_destroy = 0; 2047 if (se->op.init) { 2048 se->op.init(se->userdata, &se->conn); 2049 } 2050 2051 if (se->conn.want & (~se->conn.capable)) { 2052 fuse_log(FUSE_LOG_ERR, 2053 "fuse: error: filesystem requested capabilities " 2054 "0x%x that are not supported by kernel, aborting.\n", 2055 se->conn.want & (~se->conn.capable)); 2056 fuse_reply_err(req, EPROTO); 2057 se->error = -EPROTO; 2058 fuse_session_exit(se); 2059 return; 2060 } 2061 2062 if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) { 2063 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE; 2064 } 2065 if (arg->flags & FUSE_MAX_PAGES) { 2066 outarg.flags |= FUSE_MAX_PAGES; 2067 outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1; 2068 } 2069 2070 /* 2071 * Always enable big writes, this is superseded 2072 * by the max_write option 2073 */ 2074 outarg.flags |= FUSE_BIG_WRITES; 2075 2076 if (se->conn.want & FUSE_CAP_ASYNC_READ) { 2077 outarg.flags |= FUSE_ASYNC_READ; 2078 } 2079 if (se->conn.want & FUSE_CAP_PARALLEL_DIROPS) { 2080 outarg.flags |= FUSE_PARALLEL_DIROPS; 2081 } 2082 if (se->conn.want & FUSE_CAP_POSIX_LOCKS) { 2083 outarg.flags |= FUSE_POSIX_LOCKS; 2084 } 2085 if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC) { 2086 outarg.flags |= FUSE_ATOMIC_O_TRUNC; 2087 } 2088 if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT) { 2089 outarg.flags |= FUSE_EXPORT_SUPPORT; 2090 } 2091 if (se->conn.want & FUSE_CAP_DONT_MASK) { 2092 outarg.flags |= FUSE_DONT_MASK; 2093 } 2094 if (se->conn.want & FUSE_CAP_FLOCK_LOCKS) { 2095 outarg.flags |= FUSE_FLOCK_LOCKS; 2096 } 2097 if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA) { 2098 outarg.flags |= FUSE_AUTO_INVAL_DATA; 2099 } 2100 if (se->conn.want & FUSE_CAP_READDIRPLUS) { 2101 outarg.flags |= FUSE_DO_READDIRPLUS; 2102 } 2103 if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO) { 2104 outarg.flags |= FUSE_READDIRPLUS_AUTO; 2105 } 2106 if (se->conn.want & FUSE_CAP_ASYNC_DIO) { 2107 outarg.flags |= FUSE_ASYNC_DIO; 2108 } 2109 if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE) { 2110 outarg.flags |= FUSE_WRITEBACK_CACHE; 2111 } 2112 if (se->conn.want & FUSE_CAP_POSIX_ACL) { 2113 outarg.flags |= FUSE_POSIX_ACL; 2114 } 2115 outarg.max_readahead = se->conn.max_readahead; 2116 outarg.max_write = se->conn.max_write; 2117 if (se->conn.max_background >= (1 << 16)) { 2118 se->conn.max_background = (1 << 16) - 1; 2119 } 2120 if (se->conn.congestion_threshold > se->conn.max_background) { 2121 se->conn.congestion_threshold = se->conn.max_background; 2122 } 2123 if (!se->conn.congestion_threshold) { 2124 se->conn.congestion_threshold = se->conn.max_background * 3 / 4; 2125 } 2126 2127 outarg.max_background = se->conn.max_background; 2128 outarg.congestion_threshold = se->conn.congestion_threshold; 2129 outarg.time_gran = se->conn.time_gran; 2130 2131 if (se->conn.want & FUSE_CAP_HANDLE_KILLPRIV_V2) { 2132 outarg.flags |= FUSE_HANDLE_KILLPRIV_V2; 2133 } 2134 2135 if (se->conn.want & FUSE_CAP_SETXATTR_EXT) { 2136 outarg.flags |= FUSE_SETXATTR_EXT; 2137 } 2138 2139 fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor); 2140 fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags); 2141 fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n", outarg.max_readahead); 2142 fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write); 2143 fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n", outarg.max_background); 2144 fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n", 2145 outarg.congestion_threshold); 2146 fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n", outarg.time_gran); 2147 2148 send_reply_ok(req, &outarg, outargsize); 2149} 2150 2151static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, 2152 struct fuse_mbuf_iter *iter) 2153{ 2154 struct fuse_session *se = req->se; 2155 2156 (void)nodeid; 2157 (void)iter; 2158 2159 se->got_destroy = 1; 2160 se->got_init = 0; 2161 if (se->op.destroy) { 2162 se->op.destroy(se->userdata); 2163 } 2164 2165 send_reply_ok(req, NULL, 0); 2166} 2167 2168int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, 2169 off_t offset, struct fuse_bufvec *bufv) 2170{ 2171 struct fuse_out_header out = { 2172 .error = FUSE_NOTIFY_STORE, 2173 }; 2174 struct fuse_notify_store_out outarg = { 2175 .nodeid = ino, 2176 .offset = offset, 2177 .size = fuse_buf_size(bufv), 2178 }; 2179 struct iovec iov[3]; 2180 int res; 2181 2182 if (!se) { 2183 return -EINVAL; 2184 } 2185 2186 iov[0].iov_base = &out; 2187 iov[0].iov_len = sizeof(out); 2188 iov[1].iov_base = &outarg; 2189 iov[1].iov_len = sizeof(outarg); 2190 2191 res = fuse_send_data_iov(se, NULL, iov, 2, bufv); 2192 if (res > 0) { 2193 res = -res; 2194 } 2195 2196 return res; 2197} 2198 2199void *fuse_req_userdata(fuse_req_t req) 2200{ 2201 return req->se->userdata; 2202} 2203 2204const struct fuse_ctx *fuse_req_ctx(fuse_req_t req) 2205{ 2206 return &req->ctx; 2207} 2208 2209void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, 2210 void *data) 2211{ 2212 pthread_mutex_lock(&req->lock); 2213 pthread_mutex_lock(&req->se->lock); 2214 req->u.ni.func = func; 2215 req->u.ni.data = data; 2216 pthread_mutex_unlock(&req->se->lock); 2217 if (req->interrupted && func) { 2218 func(req, data); 2219 } 2220 pthread_mutex_unlock(&req->lock); 2221} 2222 2223int fuse_req_interrupted(fuse_req_t req) 2224{ 2225 int interrupted; 2226 2227 pthread_mutex_lock(&req->se->lock); 2228 interrupted = req->interrupted; 2229 pthread_mutex_unlock(&req->se->lock); 2230 2231 return interrupted; 2232} 2233 2234static struct { 2235 void (*func)(fuse_req_t, fuse_ino_t, struct fuse_mbuf_iter *); 2236 const char *name; 2237} fuse_ll_ops[] = { 2238 [FUSE_LOOKUP] = { do_lookup, "LOOKUP" }, 2239 [FUSE_FORGET] = { do_forget, "FORGET" }, 2240 [FUSE_GETATTR] = { do_getattr, "GETATTR" }, 2241 [FUSE_SETATTR] = { do_setattr, "SETATTR" }, 2242 [FUSE_READLINK] = { do_readlink, "READLINK" }, 2243 [FUSE_SYMLINK] = { do_symlink, "SYMLINK" }, 2244 [FUSE_MKNOD] = { do_mknod, "MKNOD" }, 2245 [FUSE_MKDIR] = { do_mkdir, "MKDIR" }, 2246 [FUSE_UNLINK] = { do_unlink, "UNLINK" }, 2247 [FUSE_RMDIR] = { do_rmdir, "RMDIR" }, 2248 [FUSE_RENAME] = { do_rename, "RENAME" }, 2249 [FUSE_LINK] = { do_link, "LINK" }, 2250 [FUSE_OPEN] = { do_open, "OPEN" }, 2251 [FUSE_READ] = { do_read, "READ" }, 2252 [FUSE_WRITE] = { do_write, "WRITE" }, 2253 [FUSE_STATFS] = { do_statfs, "STATFS" }, 2254 [FUSE_RELEASE] = { do_release, "RELEASE" }, 2255 [FUSE_FSYNC] = { do_fsync, "FSYNC" }, 2256 [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" }, 2257 [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" }, 2258 [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" }, 2259 [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" }, 2260 [FUSE_FLUSH] = { do_flush, "FLUSH" }, 2261 [FUSE_INIT] = { do_init, "INIT" }, 2262 [FUSE_OPENDIR] = { do_opendir, "OPENDIR" }, 2263 [FUSE_READDIR] = { do_readdir, "READDIR" }, 2264 [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" }, 2265 [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" }, 2266 [FUSE_GETLK] = { do_getlk, "GETLK" }, 2267 [FUSE_SETLK] = { do_setlk, "SETLK" }, 2268 [FUSE_SETLKW] = { do_setlkw, "SETLKW" }, 2269 [FUSE_ACCESS] = { do_access, "ACCESS" }, 2270 [FUSE_CREATE] = { do_create, "CREATE" }, 2271 [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" }, 2272 [FUSE_BMAP] = { do_bmap, "BMAP" }, 2273 [FUSE_IOCTL] = { do_ioctl, "IOCTL" }, 2274 [FUSE_POLL] = { do_poll, "POLL" }, 2275 [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" }, 2276 [FUSE_DESTROY] = { do_destroy, "DESTROY" }, 2277 [FUSE_NOTIFY_REPLY] = { NULL, "NOTIFY_REPLY" }, 2278 [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" }, 2279 [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS" }, 2280 [FUSE_RENAME2] = { do_rename2, "RENAME2" }, 2281 [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" }, 2282 [FUSE_LSEEK] = { do_lseek, "LSEEK" }, 2283}; 2284 2285#define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0])) 2286 2287static const char *opname(enum fuse_opcode opcode) 2288{ 2289 if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name) { 2290 return "???"; 2291 } else { 2292 return fuse_ll_ops[opcode].name; 2293 } 2294} 2295 2296void fuse_session_process_buf(struct fuse_session *se, 2297 const struct fuse_buf *buf) 2298{ 2299 struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 }; 2300 fuse_session_process_buf_int(se, &bufv, NULL); 2301} 2302 2303/* 2304 * Restriction: 2305 * bufv is normally a single entry buffer, except for a write 2306 * where (if it's in memory) then the bufv may be multiple entries, 2307 * where the first entry contains all headers and subsequent entries 2308 * contain data 2309 * bufv shall not use any offsets etc to make the data anything 2310 * other than contiguous starting from 0. 2311 */ 2312void fuse_session_process_buf_int(struct fuse_session *se, 2313 struct fuse_bufvec *bufv, 2314 struct fuse_chan *ch) 2315{ 2316 const struct fuse_buf *buf = bufv->buf; 2317 struct fuse_mbuf_iter iter = FUSE_MBUF_ITER_INIT(buf); 2318 struct fuse_in_header *in; 2319 struct fuse_req *req; 2320 int err; 2321 2322 /* The first buffer must be a memory buffer */ 2323 assert(!(buf->flags & FUSE_BUF_IS_FD)); 2324 2325 in = fuse_mbuf_iter_advance(&iter, sizeof(*in)); 2326 assert(in); /* caller guarantees the input buffer is large enough */ 2327 2328 fuse_log( 2329 FUSE_LOG_DEBUG, 2330 "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n", 2331 (unsigned long long)in->unique, opname((enum fuse_opcode)in->opcode), 2332 in->opcode, (unsigned long long)in->nodeid, buf->size, in->pid); 2333 2334 req = fuse_ll_alloc_req(se); 2335 if (req == NULL) { 2336 struct fuse_out_header out = { 2337 .unique = in->unique, 2338 .error = -ENOMEM, 2339 }; 2340 struct iovec iov = { 2341 .iov_base = &out, 2342 .iov_len = sizeof(struct fuse_out_header), 2343 }; 2344 2345 fuse_send_msg(se, ch, &iov, 1); 2346 return; 2347 } 2348 2349 req->unique = in->unique; 2350 req->ctx.uid = in->uid; 2351 req->ctx.gid = in->gid; 2352 req->ctx.pid = in->pid; 2353 req->ch = ch; 2354 2355 /* 2356 * INIT and DESTROY requests are serialized, all other request types 2357 * run in parallel. This prevents races between FUSE_INIT and ordinary 2358 * requests, FUSE_INIT and FUSE_INIT, FUSE_INIT and FUSE_DESTROY, and 2359 * FUSE_DESTROY and FUSE_DESTROY. 2360 */ 2361 if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT || 2362 in->opcode == FUSE_DESTROY) { 2363 pthread_rwlock_wrlock(&se->init_rwlock); 2364 } else { 2365 pthread_rwlock_rdlock(&se->init_rwlock); 2366 } 2367 2368 err = EIO; 2369 if (!se->got_init) { 2370 enum fuse_opcode expected; 2371 2372 expected = se->cuse_data ? CUSE_INIT : FUSE_INIT; 2373 if (in->opcode != expected) { 2374 goto reply_err; 2375 } 2376 } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT) { 2377 if (fuse_lowlevel_is_virtio(se)) { 2378 /* 2379 * TODO: This is after a hard reboot typically, we need to do 2380 * a destroy, but we can't reply to this request yet so 2381 * we can't use do_destroy 2382 */ 2383 fuse_log(FUSE_LOG_DEBUG, "%s: reinit\n", __func__); 2384 se->got_destroy = 1; 2385 se->got_init = 0; 2386 if (se->op.destroy) { 2387 se->op.destroy(se->userdata); 2388 } 2389 } else { 2390 goto reply_err; 2391 } 2392 } 2393 2394 err = EACCES; 2395 /* Implement -o allow_root */ 2396 if (se->deny_others && in->uid != se->owner && in->uid != 0 && 2397 in->opcode != FUSE_INIT && in->opcode != FUSE_READ && 2398 in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC && 2399 in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR && 2400 in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR && 2401 in->opcode != FUSE_NOTIFY_REPLY && in->opcode != FUSE_READDIRPLUS) { 2402 goto reply_err; 2403 } 2404 2405 err = ENOSYS; 2406 if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func) { 2407 goto reply_err; 2408 } 2409 if (in->opcode != FUSE_INTERRUPT) { 2410 struct fuse_req *intr; 2411 pthread_mutex_lock(&se->lock); 2412 intr = check_interrupt(se, req); 2413 list_add_req(req, &se->list); 2414 pthread_mutex_unlock(&se->lock); 2415 if (intr) { 2416 fuse_reply_err(intr, EAGAIN); 2417 } 2418 } 2419 2420 if (in->opcode == FUSE_WRITE && se->op.write_buf) { 2421 do_write_buf(req, in->nodeid, &iter, bufv); 2422 } else { 2423 fuse_ll_ops[in->opcode].func(req, in->nodeid, &iter); 2424 } 2425 2426 pthread_rwlock_unlock(&se->init_rwlock); 2427 return; 2428 2429reply_err: 2430 fuse_reply_err(req, err); 2431 pthread_rwlock_unlock(&se->init_rwlock); 2432} 2433 2434#define LL_OPTION(n, o, v) \ 2435 { \ 2436 n, offsetof(struct fuse_session, o), v \ 2437 } 2438 2439static const struct fuse_opt fuse_ll_opts[] = { 2440 LL_OPTION("debug", debug, 1), 2441 LL_OPTION("-d", debug, 1), 2442 LL_OPTION("--debug", debug, 1), 2443 LL_OPTION("allow_root", deny_others, 1), 2444 LL_OPTION("--socket-path=%s", vu_socket_path, 0), 2445 LL_OPTION("--socket-group=%s", vu_socket_group, 0), 2446 LL_OPTION("--fd=%d", vu_listen_fd, 0), 2447 LL_OPTION("--thread-pool-size=%d", thread_pool_size, 0), 2448 FUSE_OPT_END 2449}; 2450 2451void fuse_lowlevel_version(void) 2452{ 2453 printf("using FUSE kernel interface version %i.%i\n", FUSE_KERNEL_VERSION, 2454 FUSE_KERNEL_MINOR_VERSION); 2455} 2456 2457void fuse_lowlevel_help(void) 2458{ 2459 /* 2460 * These are not all options, but the ones that are 2461 * potentially of interest to an end-user 2462 */ 2463 printf( 2464 " -o allow_root allow access by root\n" 2465 " --socket-path=PATH path for the vhost-user socket\n" 2466 " --socket-group=GRNAME name of group for the vhost-user socket\n" 2467 " --fd=FDNUM fd number of vhost-user socket\n" 2468 " --thread-pool-size=NUM thread pool size limit (default %d)\n", 2469 THREAD_POOL_SIZE); 2470} 2471 2472void fuse_session_destroy(struct fuse_session *se) 2473{ 2474 if (se->got_init && !se->got_destroy) { 2475 if (se->op.destroy) { 2476 se->op.destroy(se->userdata); 2477 } 2478 } 2479 pthread_rwlock_destroy(&se->init_rwlock); 2480 pthread_mutex_destroy(&se->lock); 2481 free(se->cuse_data); 2482 if (se->fd != -1) { 2483 close(se->fd); 2484 } 2485 2486 if (fuse_lowlevel_is_virtio(se)) { 2487 virtio_session_close(se); 2488 } 2489 2490 free(se->vu_socket_path); 2491 se->vu_socket_path = NULL; 2492 2493 g_free(se); 2494} 2495 2496 2497struct fuse_session *fuse_session_new(struct fuse_args *args, 2498 const struct fuse_lowlevel_ops *op, 2499 size_t op_size, void *userdata) 2500{ 2501 struct fuse_session *se; 2502 2503 if (sizeof(struct fuse_lowlevel_ops) < op_size) { 2504 fuse_log( 2505 FUSE_LOG_ERR, 2506 "fuse: warning: library too old, some operations may not work\n"); 2507 op_size = sizeof(struct fuse_lowlevel_ops); 2508 } 2509 2510 if (args->argc == 0) { 2511 fuse_log(FUSE_LOG_ERR, 2512 "fuse: empty argv passed to fuse_session_new().\n"); 2513 return NULL; 2514 } 2515 2516 se = g_try_new0(struct fuse_session, 1); 2517 if (se == NULL) { 2518 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n"); 2519 goto out1; 2520 } 2521 se->fd = -1; 2522 se->vu_listen_fd = -1; 2523 se->thread_pool_size = THREAD_POOL_SIZE; 2524 se->conn.max_write = UINT_MAX; 2525 se->conn.max_readahead = UINT_MAX; 2526 2527 /* Parse options */ 2528 if (fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1) { 2529 goto out2; 2530 } 2531 if (args->argc == 1 && args->argv[0][0] == '-') { 2532 fuse_log(FUSE_LOG_ERR, 2533 "fuse: warning: argv[0] looks like an option, but " 2534 "will be ignored\n"); 2535 } else if (args->argc != 1) { 2536 int i; 2537 fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `"); 2538 for (i = 1; i < args->argc - 1; i++) { 2539 fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]); 2540 } 2541 fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]); 2542 goto out4; 2543 } 2544 2545 if (!se->vu_socket_path && se->vu_listen_fd < 0) { 2546 fuse_log(FUSE_LOG_ERR, "fuse: missing --socket-path or --fd option\n"); 2547 goto out4; 2548 } 2549 if (se->vu_socket_path && se->vu_listen_fd >= 0) { 2550 fuse_log(FUSE_LOG_ERR, 2551 "fuse: --socket-path and --fd cannot be given together\n"); 2552 goto out4; 2553 } 2554 if (se->vu_socket_group && !se->vu_socket_path) { 2555 fuse_log(FUSE_LOG_ERR, 2556 "fuse: --socket-group can only be used with --socket-path\n"); 2557 goto out4; 2558 } 2559 2560 se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() + FUSE_BUFFER_HEADER_SIZE; 2561 2562 list_init_req(&se->list); 2563 list_init_req(&se->interrupts); 2564 fuse_mutex_init(&se->lock); 2565 pthread_rwlock_init(&se->init_rwlock, NULL); 2566 2567 memcpy(&se->op, op, op_size); 2568 se->owner = getuid(); 2569 se->userdata = userdata; 2570 2571 return se; 2572 2573out4: 2574 fuse_opt_free_args(args); 2575out2: 2576 g_free(se); 2577out1: 2578 return NULL; 2579} 2580 2581int fuse_session_mount(struct fuse_session *se) 2582{ 2583 return virtio_session_mount(se); 2584} 2585 2586int fuse_session_fd(struct fuse_session *se) 2587{ 2588 return se->fd; 2589} 2590 2591void fuse_session_unmount(struct fuse_session *se) 2592{ 2593} 2594 2595int fuse_lowlevel_is_virtio(struct fuse_session *se) 2596{ 2597 return !!se->virtio_dev; 2598} 2599 2600void fuse_session_exit(struct fuse_session *se) 2601{ 2602 se->exited = 1; 2603} 2604 2605void fuse_session_reset(struct fuse_session *se) 2606{ 2607 se->exited = 0; 2608 se->error = 0; 2609} 2610 2611int fuse_session_exited(struct fuse_session *se) 2612{ 2613 return se->exited; 2614}