ondemand.c (12625B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2#include <linux/fdtable.h> 3#include <linux/anon_inodes.h> 4#include <linux/uio.h> 5#include "internal.h" 6 7static int cachefiles_ondemand_fd_release(struct inode *inode, 8 struct file *file) 9{ 10 struct cachefiles_object *object = file->private_data; 11 struct cachefiles_cache *cache = object->volume->cache; 12 int object_id = object->ondemand_id; 13 struct cachefiles_req *req; 14 XA_STATE(xas, &cache->reqs, 0); 15 16 xa_lock(&cache->reqs); 17 object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; 18 19 /* 20 * Flush all pending READ requests since their completion depends on 21 * anon_fd. 22 */ 23 xas_for_each(&xas, req, ULONG_MAX) { 24 if (req->msg.object_id == object_id && 25 req->msg.opcode == CACHEFILES_OP_READ) { 26 req->error = -EIO; 27 complete(&req->done); 28 xas_store(&xas, NULL); 29 } 30 } 31 xa_unlock(&cache->reqs); 32 33 xa_erase(&cache->ondemand_ids, object_id); 34 trace_cachefiles_ondemand_fd_release(object, object_id); 35 cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); 36 cachefiles_put_unbind_pincount(cache); 37 return 0; 38} 39 40static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb, 41 struct iov_iter *iter) 42{ 43 struct cachefiles_object *object = kiocb->ki_filp->private_data; 44 struct cachefiles_cache *cache = object->volume->cache; 45 struct file *file = object->file; 46 size_t len = iter->count; 47 loff_t pos = kiocb->ki_pos; 48 const struct cred *saved_cred; 49 int ret; 50 51 if (!file) 52 return -ENOBUFS; 53 54 cachefiles_begin_secure(cache, &saved_cred); 55 ret = __cachefiles_prepare_write(object, file, &pos, &len, true); 56 cachefiles_end_secure(cache, saved_cred); 57 if (ret < 0) 58 return ret; 59 60 trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len); 61 ret = __cachefiles_write(object, file, pos, iter, NULL, NULL); 62 if (!ret) 63 ret = len; 64 65 return ret; 66} 67 68static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos, 69 int whence) 70{ 71 struct cachefiles_object *object = filp->private_data; 72 struct file *file = object->file; 73 74 if (!file) 75 return -ENOBUFS; 76 77 return vfs_llseek(file, pos, whence); 78} 79 80static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl, 81 unsigned long arg) 82{ 83 struct cachefiles_object *object = filp->private_data; 84 struct cachefiles_cache *cache = object->volume->cache; 85 struct cachefiles_req *req; 86 unsigned long id; 87 88 if (ioctl != CACHEFILES_IOC_READ_COMPLETE) 89 return -EINVAL; 90 91 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) 92 return -EOPNOTSUPP; 93 94 id = arg; 95 req = xa_erase(&cache->reqs, id); 96 if (!req) 97 return -EINVAL; 98 99 trace_cachefiles_ondemand_cread(object, id); 100 complete(&req->done); 101 return 0; 102} 103 104static const struct file_operations cachefiles_ondemand_fd_fops = { 105 .owner = THIS_MODULE, 106 .release = cachefiles_ondemand_fd_release, 107 .write_iter = cachefiles_ondemand_fd_write_iter, 108 .llseek = cachefiles_ondemand_fd_llseek, 109 .unlocked_ioctl = cachefiles_ondemand_fd_ioctl, 110}; 111 112/* 113 * OPEN request Completion (copen) 114 * - command: "copen <id>,<cache_size>" 115 * <cache_size> indicates the object size if >=0, error code if negative 116 */ 117int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) 118{ 119 struct cachefiles_req *req; 120 struct fscache_cookie *cookie; 121 char *pid, *psize; 122 unsigned long id; 123 long size; 124 int ret; 125 126 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) 127 return -EOPNOTSUPP; 128 129 if (!*args) { 130 pr_err("Empty id specified\n"); 131 return -EINVAL; 132 } 133 134 pid = args; 135 psize = strchr(args, ','); 136 if (!psize) { 137 pr_err("Cache size is not specified\n"); 138 return -EINVAL; 139 } 140 141 *psize = 0; 142 psize++; 143 144 ret = kstrtoul(pid, 0, &id); 145 if (ret) 146 return ret; 147 148 req = xa_erase(&cache->reqs, id); 149 if (!req) 150 return -EINVAL; 151 152 /* fail OPEN request if copen format is invalid */ 153 ret = kstrtol(psize, 0, &size); 154 if (ret) { 155 req->error = ret; 156 goto out; 157 } 158 159 /* fail OPEN request if daemon reports an error */ 160 if (size < 0) { 161 if (!IS_ERR_VALUE(size)) 162 size = -EINVAL; 163 req->error = size; 164 goto out; 165 } 166 167 cookie = req->object->cookie; 168 cookie->object_size = size; 169 if (size) 170 clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); 171 else 172 set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); 173 trace_cachefiles_ondemand_copen(req->object, id, size); 174 175out: 176 complete(&req->done); 177 return ret; 178} 179 180static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) 181{ 182 struct cachefiles_object *object; 183 struct cachefiles_cache *cache; 184 struct cachefiles_open *load; 185 struct file *file; 186 u32 object_id; 187 int ret, fd; 188 189 object = cachefiles_grab_object(req->object, 190 cachefiles_obj_get_ondemand_fd); 191 cache = object->volume->cache; 192 193 ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL, 194 XA_LIMIT(1, INT_MAX), 195 &cache->ondemand_id_next, GFP_KERNEL); 196 if (ret < 0) 197 goto err; 198 199 fd = get_unused_fd_flags(O_WRONLY); 200 if (fd < 0) { 201 ret = fd; 202 goto err_free_id; 203 } 204 205 file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops, 206 object, O_WRONLY); 207 if (IS_ERR(file)) { 208 ret = PTR_ERR(file); 209 goto err_put_fd; 210 } 211 212 file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; 213 fd_install(fd, file); 214 215 load = (void *)req->msg.data; 216 load->fd = fd; 217 req->msg.object_id = object_id; 218 object->ondemand_id = object_id; 219 220 cachefiles_get_unbind_pincount(cache); 221 trace_cachefiles_ondemand_open(object, &req->msg, load); 222 return 0; 223 224err_put_fd: 225 put_unused_fd(fd); 226err_free_id: 227 xa_erase(&cache->ondemand_ids, object_id); 228err: 229 cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); 230 return ret; 231} 232 233ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, 234 char __user *_buffer, size_t buflen) 235{ 236 struct cachefiles_req *req; 237 struct cachefiles_msg *msg; 238 unsigned long id = 0; 239 size_t n; 240 int ret = 0; 241 XA_STATE(xas, &cache->reqs, 0); 242 243 /* 244 * Search for a request that has not ever been processed, to prevent 245 * requests from being processed repeatedly. 246 */ 247 xa_lock(&cache->reqs); 248 req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); 249 if (!req) { 250 xa_unlock(&cache->reqs); 251 return 0; 252 } 253 254 msg = &req->msg; 255 n = msg->len; 256 257 if (n > buflen) { 258 xa_unlock(&cache->reqs); 259 return -EMSGSIZE; 260 } 261 262 xas_clear_mark(&xas, CACHEFILES_REQ_NEW); 263 xa_unlock(&cache->reqs); 264 265 id = xas.xa_index; 266 msg->msg_id = id; 267 268 if (msg->opcode == CACHEFILES_OP_OPEN) { 269 ret = cachefiles_ondemand_get_fd(req); 270 if (ret) 271 goto error; 272 } 273 274 if (copy_to_user(_buffer, msg, n) != 0) { 275 ret = -EFAULT; 276 goto err_put_fd; 277 } 278 279 /* CLOSE request has no reply */ 280 if (msg->opcode == CACHEFILES_OP_CLOSE) { 281 xa_erase(&cache->reqs, id); 282 complete(&req->done); 283 } 284 285 return n; 286 287err_put_fd: 288 if (msg->opcode == CACHEFILES_OP_OPEN) 289 close_fd(((struct cachefiles_open *)msg->data)->fd); 290error: 291 xa_erase(&cache->reqs, id); 292 req->error = ret; 293 complete(&req->done); 294 return ret; 295} 296 297typedef int (*init_req_fn)(struct cachefiles_req *req, void *private); 298 299static int cachefiles_ondemand_send_req(struct cachefiles_object *object, 300 enum cachefiles_opcode opcode, 301 size_t data_len, 302 init_req_fn init_req, 303 void *private) 304{ 305 struct cachefiles_cache *cache = object->volume->cache; 306 struct cachefiles_req *req; 307 XA_STATE(xas, &cache->reqs, 0); 308 int ret; 309 310 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) 311 return 0; 312 313 if (test_bit(CACHEFILES_DEAD, &cache->flags)) 314 return -EIO; 315 316 req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); 317 if (!req) 318 return -ENOMEM; 319 320 req->object = object; 321 init_completion(&req->done); 322 req->msg.opcode = opcode; 323 req->msg.len = sizeof(struct cachefiles_msg) + data_len; 324 325 ret = init_req(req, private); 326 if (ret) 327 goto out; 328 329 do { 330 /* 331 * Stop enqueuing the request when daemon is dying. The 332 * following two operations need to be atomic as a whole. 333 * 1) check cache state, and 334 * 2) enqueue request if cache is alive. 335 * Otherwise the request may be enqueued after xarray has been 336 * flushed, leaving the orphan request never being completed. 337 * 338 * CPU 1 CPU 2 339 * ===== ===== 340 * test CACHEFILES_DEAD bit 341 * set CACHEFILES_DEAD bit 342 * flush requests in the xarray 343 * enqueue the request 344 */ 345 xas_lock(&xas); 346 347 if (test_bit(CACHEFILES_DEAD, &cache->flags)) { 348 xas_unlock(&xas); 349 ret = -EIO; 350 goto out; 351 } 352 353 /* coupled with the barrier in cachefiles_flush_reqs() */ 354 smp_mb(); 355 356 if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) { 357 WARN_ON_ONCE(object->ondemand_id == 0); 358 xas_unlock(&xas); 359 ret = -EIO; 360 goto out; 361 } 362 363 xas.xa_index = 0; 364 xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); 365 if (xas.xa_node == XAS_RESTART) 366 xas_set_err(&xas, -EBUSY); 367 xas_store(&xas, req); 368 xas_clear_mark(&xas, XA_FREE_MARK); 369 xas_set_mark(&xas, CACHEFILES_REQ_NEW); 370 xas_unlock(&xas); 371 } while (xas_nomem(&xas, GFP_KERNEL)); 372 373 ret = xas_error(&xas); 374 if (ret) 375 goto out; 376 377 wake_up_all(&cache->daemon_pollwq); 378 wait_for_completion(&req->done); 379 ret = req->error; 380out: 381 kfree(req); 382 return ret; 383} 384 385static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req, 386 void *private) 387{ 388 struct cachefiles_object *object = req->object; 389 struct fscache_cookie *cookie = object->cookie; 390 struct fscache_volume *volume = object->volume->vcookie; 391 struct cachefiles_open *load = (void *)req->msg.data; 392 size_t volume_key_size, cookie_key_size; 393 void *volume_key, *cookie_key; 394 395 /* 396 * Volume key is a NUL-terminated string. key[0] stores strlen() of the 397 * string, followed by the content of the string (excluding '\0'). 398 */ 399 volume_key_size = volume->key[0] + 1; 400 volume_key = volume->key + 1; 401 402 /* Cookie key is binary data, which is netfs specific. */ 403 cookie_key_size = cookie->key_len; 404 cookie_key = fscache_get_key(cookie); 405 406 if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) { 407 pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n"); 408 return -EINVAL; 409 } 410 411 load->volume_key_size = volume_key_size; 412 load->cookie_key_size = cookie_key_size; 413 memcpy(load->data, volume_key, volume_key_size); 414 memcpy(load->data + volume_key_size, cookie_key, cookie_key_size); 415 416 return 0; 417} 418 419static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, 420 void *private) 421{ 422 struct cachefiles_object *object = req->object; 423 int object_id = object->ondemand_id; 424 425 /* 426 * It's possible that object id is still 0 if the cookie looking up 427 * phase failed before OPEN request has ever been sent. Also avoid 428 * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means 429 * anon_fd has already been closed. 430 */ 431 if (object_id <= 0) 432 return -ENOENT; 433 434 req->msg.object_id = object_id; 435 trace_cachefiles_ondemand_close(object, &req->msg); 436 return 0; 437} 438 439struct cachefiles_read_ctx { 440 loff_t off; 441 size_t len; 442}; 443 444static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, 445 void *private) 446{ 447 struct cachefiles_object *object = req->object; 448 struct cachefiles_read *load = (void *)req->msg.data; 449 struct cachefiles_read_ctx *read_ctx = private; 450 int object_id = object->ondemand_id; 451 452 /* Stop enqueuing requests when daemon has closed anon_fd. */ 453 if (object_id <= 0) { 454 WARN_ON_ONCE(object_id == 0); 455 pr_info_once("READ: anonymous fd closed prematurely.\n"); 456 return -EIO; 457 } 458 459 req->msg.object_id = object_id; 460 load->off = read_ctx->off; 461 load->len = read_ctx->len; 462 trace_cachefiles_ondemand_read(object, &req->msg, load); 463 return 0; 464} 465 466int cachefiles_ondemand_init_object(struct cachefiles_object *object) 467{ 468 struct fscache_cookie *cookie = object->cookie; 469 struct fscache_volume *volume = object->volume->vcookie; 470 size_t volume_key_size, cookie_key_size, data_len; 471 472 /* 473 * CacheFiles will firstly check the cache file under the root cache 474 * directory. If the coherency check failed, it will fallback to 475 * creating a new tmpfile as the cache file. Reuse the previously 476 * allocated object ID if any. 477 */ 478 if (object->ondemand_id > 0) 479 return 0; 480 481 volume_key_size = volume->key[0] + 1; 482 cookie_key_size = cookie->key_len; 483 data_len = sizeof(struct cachefiles_open) + 484 volume_key_size + cookie_key_size; 485 486 return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN, 487 data_len, cachefiles_ondemand_init_open_req, NULL); 488} 489 490void cachefiles_ondemand_clean_object(struct cachefiles_object *object) 491{ 492 cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, 493 cachefiles_ondemand_init_close_req, NULL); 494} 495 496int cachefiles_ondemand_read(struct cachefiles_object *object, 497 loff_t pos, size_t len) 498{ 499 struct cachefiles_read_ctx read_ctx = {pos, len}; 500 501 return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ, 502 sizeof(struct cachefiles_read), 503 cachefiles_ondemand_init_read_req, &read_ctx); 504}