nfp_mutex.c (9368B)
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ 3 4#include <linux/delay.h> 5#include <linux/device.h> 6#include <linux/jiffies.h> 7#include <linux/types.h> 8#include <linux/slab.h> 9#include <linux/wait.h> 10 11#include "nfp_cpp.h" 12#include "nfp6000/nfp6000.h" 13 14struct nfp_cpp_mutex { 15 struct nfp_cpp *cpp; 16 int target; 17 u16 depth; 18 unsigned long long address; 19 u32 key; 20}; 21 22static u32 nfp_mutex_locked(u16 interface) 23{ 24 return (u32)interface << 16 | 0x000f; 25} 26 27static u32 nfp_mutex_unlocked(u16 interface) 28{ 29 return (u32)interface << 16 | 0x0000; 30} 31 32static u32 nfp_mutex_owner(u32 val) 33{ 34 return val >> 16; 35} 36 37static bool nfp_mutex_is_locked(u32 val) 38{ 39 return (val & 0xffff) == 0x000f; 40} 41 42static bool nfp_mutex_is_unlocked(u32 val) 43{ 44 return (val & 0xffff) == 0000; 45} 46 47/* If you need more than 65536 recursive locks, please rethink your code. */ 48#define NFP_MUTEX_DEPTH_MAX 0xffff 49 50static int 51nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address) 52{ 53 /* Not permitted on invalid interfaces */ 54 if (NFP_CPP_INTERFACE_TYPE_of(interface) == 55 NFP_CPP_INTERFACE_TYPE_INVALID) 56 return -EINVAL; 57 58 /* Address must be 64-bit aligned */ 59 if (address & 7) 60 return -EINVAL; 61 62 if (*target != NFP_CPP_TARGET_MU) 63 return -EINVAL; 64 65 return 0; 66} 67 68/** 69 * nfp_cpp_mutex_init() - Initialize a mutex location 70 * @cpp: NFP CPP handle 71 * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) 72 * @address: Offset into the address space of the NFP CPP target ID 73 * @key: Unique 32-bit value for this mutex 74 * 75 * The CPP target:address must point to a 64-bit aligned location, and 76 * will initialize 64 bits of data at the location. 77 * 78 * This creates the initial mutex state, as locked by this 79 * nfp_cpp_interface(). 80 * 81 * This function should only be called when setting up 82 * the initial lock state upon boot-up of the system. 83 * 84 * Return: 0 on success, or -errno on failure 85 */ 86int nfp_cpp_mutex_init(struct nfp_cpp *cpp, 87 int target, unsigned long long address, u32 key) 88{ 89 const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ 90 u16 interface = nfp_cpp_interface(cpp); 91 int err; 92 93 err = nfp_cpp_mutex_validate(interface, &target, address); 94 if (err) 95 return err; 96 97 err = nfp_cpp_writel(cpp, muw, address + 4, key); 98 if (err) 99 return err; 100 101 err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); 102 if (err) 103 return err; 104 105 return 0; 106} 107 108/** 109 * nfp_cpp_mutex_alloc() - Create a mutex handle 110 * @cpp: NFP CPP handle 111 * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) 112 * @address: Offset into the address space of the NFP CPP target ID 113 * @key: 32-bit unique key (must match the key at this location) 114 * 115 * The CPP target:address must point to a 64-bit aligned location, and 116 * reserve 64 bits of data at the location for use by the handle. 117 * 118 * Only target/address pairs that point to entities that support the 119 * MU Atomic Engine's CmpAndSwap32 command are supported. 120 * 121 * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. 122 */ 123struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, 124 unsigned long long address, u32 key) 125{ 126 const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ 127 u16 interface = nfp_cpp_interface(cpp); 128 struct nfp_cpp_mutex *mutex; 129 int err; 130 u32 tmp; 131 132 err = nfp_cpp_mutex_validate(interface, &target, address); 133 if (err) 134 return NULL; 135 136 err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); 137 if (err < 0) 138 return NULL; 139 140 if (tmp != key) 141 return NULL; 142 143 mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); 144 if (!mutex) 145 return NULL; 146 147 mutex->cpp = cpp; 148 mutex->target = target; 149 mutex->address = address; 150 mutex->key = key; 151 mutex->depth = 0; 152 153 return mutex; 154} 155 156/** 157 * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state 158 * @mutex: NFP CPP Mutex handle 159 */ 160void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) 161{ 162 kfree(mutex); 163} 164 165/** 166 * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine 167 * @mutex: NFP CPP Mutex handle 168 * 169 * Return: 0 on success, or -errno on failure 170 */ 171int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) 172{ 173 unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; 174 unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ; 175 unsigned int timeout_ms = 1; 176 int err; 177 178 /* We can't use a waitqueue here, because the unlocker 179 * might be on a separate CPU. 180 * 181 * So just wait for now. 182 */ 183 for (;;) { 184 err = nfp_cpp_mutex_trylock(mutex); 185 if (err != -EBUSY) 186 break; 187 188 err = msleep_interruptible(timeout_ms); 189 if (err != 0) { 190 nfp_info(mutex->cpp, 191 "interrupted waiting for NFP mutex\n"); 192 return -ERESTARTSYS; 193 } 194 195 if (time_is_before_eq_jiffies(warn_at)) { 196 warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; 197 nfp_warn(mutex->cpp, 198 "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n", 199 mutex->depth, 200 mutex->target, mutex->address, mutex->key); 201 } 202 if (time_is_before_eq_jiffies(err_at)) { 203 nfp_err(mutex->cpp, "Error: mutex wait timed out\n"); 204 return -EBUSY; 205 } 206 } 207 208 return err; 209} 210 211/** 212 * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine 213 * @mutex: NFP CPP Mutex handle 214 * 215 * Return: 0 on success, or -errno on failure 216 */ 217int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) 218{ 219 const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ 220 const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ 221 struct nfp_cpp *cpp = mutex->cpp; 222 u32 key, value; 223 u16 interface; 224 int err; 225 226 interface = nfp_cpp_interface(cpp); 227 228 if (mutex->depth > 1) { 229 mutex->depth--; 230 return 0; 231 } 232 233 err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); 234 if (err < 0) 235 return err; 236 237 if (key != mutex->key) 238 return -EPERM; 239 240 err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); 241 if (err < 0) 242 return err; 243 244 if (value != nfp_mutex_locked(interface)) 245 return -EACCES; 246 247 err = nfp_cpp_writel(cpp, muw, mutex->address, 248 nfp_mutex_unlocked(interface)); 249 if (err < 0) 250 return err; 251 252 mutex->depth = 0; 253 return 0; 254} 255 256/** 257 * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle 258 * @mutex: NFP CPP Mutex handle 259 * 260 * Return: 0 if the lock succeeded, -errno on failure 261 */ 262int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) 263{ 264 const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ 265 const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ 266 const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ 267 struct nfp_cpp *cpp = mutex->cpp; 268 u32 key, value, tmp; 269 int err; 270 271 if (mutex->depth > 0) { 272 if (mutex->depth == NFP_MUTEX_DEPTH_MAX) 273 return -E2BIG; 274 mutex->depth++; 275 return 0; 276 } 277 278 /* Verify that the lock marker is not damaged */ 279 err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); 280 if (err < 0) 281 return err; 282 283 if (key != mutex->key) 284 return -EPERM; 285 286 /* Compare against the unlocked state, and if true, 287 * write the interface id into the top 16 bits, and 288 * mark as locked. 289 */ 290 value = nfp_mutex_locked(nfp_cpp_interface(cpp)); 291 292 /* We use test_set_imm here, as it implies a read 293 * of the current state, and sets the bits in the 294 * bytemask of the command to 1s. Since the mutex 295 * is guaranteed to be 64-bit aligned, the bytemask 296 * of this 32-bit command is ensured to be 8'b00001111, 297 * which implies that the lower 4 bits will be set to 298 * ones regardless of the initial state. 299 * 300 * Since this is a 'Readback' operation, with no Pull 301 * data, we can treat this as a normal Push (read) 302 * atomic, which returns the original value. 303 */ 304 err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); 305 if (err < 0) 306 return err; 307 308 /* Was it unlocked? */ 309 if (nfp_mutex_is_unlocked(tmp)) { 310 /* The read value can only be 0x....0000 in the unlocked state. 311 * If there was another contending for this lock, then 312 * the lock state would be 0x....000f 313 */ 314 315 /* Write our owner ID into the lock 316 * While not strictly necessary, this helps with 317 * debug and bookkeeping. 318 */ 319 err = nfp_cpp_writel(cpp, muw, mutex->address, value); 320 if (err < 0) 321 return err; 322 323 mutex->depth = 1; 324 return 0; 325 } 326 327 return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; 328} 329 330/** 331 * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint 332 * @cpp: NFP CPP handle 333 * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) 334 * @address: Offset into the address space of the NFP CPP target ID 335 * 336 * Release lock if held by local system. Extreme care is advised, call only 337 * when no local lock users can exist. 338 * 339 * Return: 0 if the lock was OK, 1 if locked by us, -errno on invalid mutex 340 */ 341int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, 342 unsigned long long address) 343{ 344 const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ 345 const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ 346 u16 interface = nfp_cpp_interface(cpp); 347 int err; 348 u32 tmp; 349 350 err = nfp_cpp_mutex_validate(interface, &target, address); 351 if (err) 352 return err; 353 354 /* Check lock */ 355 err = nfp_cpp_readl(cpp, mur, address, &tmp); 356 if (err < 0) 357 return err; 358 359 if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) 360 return 0; 361 362 /* Bust the lock */ 363 err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); 364 if (err < 0) 365 return err; 366 367 return 1; 368}