cvmx-cmd-queue.c (9009B)
1/***********************license start*************** 2 * Author: Cavium Networks 3 * 4 * Contact: support@caviumnetworks.com 5 * This file is part of the OCTEON SDK 6 * 7 * Copyright (c) 2003-2008 Cavium Networks 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this file; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 * or visit http://www.gnu.org/licenses/. 23 * 24 * This file may also be available under a different license from Cavium. 25 * Contact Cavium Networks for more information 26 ***********************license end**************************************/ 27 28/* 29 * Support functions for managing command queues used for 30 * various hardware blocks. 31 */ 32 33#include <linux/kernel.h> 34 35#include <asm/octeon/octeon.h> 36 37#include <asm/octeon/cvmx-config.h> 38#include <asm/octeon/cvmx-fpa.h> 39#include <asm/octeon/cvmx-cmd-queue.h> 40 41#include <asm/octeon/cvmx-npei-defs.h> 42#include <asm/octeon/cvmx-pexp-defs.h> 43#include <asm/octeon/cvmx-pko-defs.h> 44 45/* 46 * This application uses this pointer to access the global queue 47 * state. It points to a bootmem named block. 48 */ 49__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr; 50EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr); 51 52/* 53 * Initialize the Global queue state pointer. 54 * 55 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code 56 */ 57static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void) 58{ 59 char *alloc_name = "cvmx_cmd_queues"; 60 61 if (likely(__cvmx_cmd_queue_state_ptr)) 62 return CVMX_CMD_QUEUE_SUCCESS; 63 64 __cvmx_cmd_queue_state_ptr = 65 cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 66 128, 67 alloc_name); 68 if (__cvmx_cmd_queue_state_ptr) 69 memset(__cvmx_cmd_queue_state_ptr, 0, 70 sizeof(*__cvmx_cmd_queue_state_ptr)); 71 else { 72 struct cvmx_bootmem_named_block_desc *block_desc = 73 cvmx_bootmem_find_named_block(alloc_name); 74 if (block_desc) 75 __cvmx_cmd_queue_state_ptr = 76 cvmx_phys_to_ptr(block_desc->base_addr); 77 else { 78 cvmx_dprintf 79 ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n", 80 alloc_name); 81 return CVMX_CMD_QUEUE_NO_MEMORY; 82 } 83 } 84 return CVMX_CMD_QUEUE_SUCCESS; 85} 86 87/* 88 * Initialize a command queue for use. The initial FPA buffer is 89 * allocated and the hardware unit is configured to point to the 90 * new command queue. 91 * 92 * @queue_id: Hardware command queue to initialize. 93 * @max_depth: Maximum outstanding commands that can be queued. 94 * @fpa_pool: FPA pool the command queues should come from. 95 * @pool_size: Size of each buffer in the FPA pool (bytes) 96 * 97 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code 98 */ 99cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, 100 int max_depth, int fpa_pool, 101 int pool_size) 102{ 103 __cvmx_cmd_queue_state_t *qstate; 104 cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr(); 105 if (result != CVMX_CMD_QUEUE_SUCCESS) 106 return result; 107 108 qstate = __cvmx_cmd_queue_get_state(queue_id); 109 if (qstate == NULL) 110 return CVMX_CMD_QUEUE_INVALID_PARAM; 111 112 /* 113 * We artificially limit max_depth to 1<<20 words. It is an 114 * arbitrary limit. 115 */ 116 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) { 117 if ((max_depth < 0) || (max_depth > 1 << 20)) 118 return CVMX_CMD_QUEUE_INVALID_PARAM; 119 } else if (max_depth != 0) 120 return CVMX_CMD_QUEUE_INVALID_PARAM; 121 122 if ((fpa_pool < 0) || (fpa_pool > 7)) 123 return CVMX_CMD_QUEUE_INVALID_PARAM; 124 if ((pool_size < 128) || (pool_size > 65536)) 125 return CVMX_CMD_QUEUE_INVALID_PARAM; 126 127 /* See if someone else has already initialized the queue */ 128 if (qstate->base_ptr_div128) { 129 if (max_depth != (int)qstate->max_depth) { 130 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " 131 "Queue already initialized with different " 132 "max_depth (%d).\n", 133 (int)qstate->max_depth); 134 return CVMX_CMD_QUEUE_INVALID_PARAM; 135 } 136 if (fpa_pool != qstate->fpa_pool) { 137 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " 138 "Queue already initialized with different " 139 "FPA pool (%u).\n", 140 qstate->fpa_pool); 141 return CVMX_CMD_QUEUE_INVALID_PARAM; 142 } 143 if ((pool_size >> 3) - 1 != qstate->pool_size_m1) { 144 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " 145 "Queue already initialized with different " 146 "FPA pool size (%u).\n", 147 (qstate->pool_size_m1 + 1) << 3); 148 return CVMX_CMD_QUEUE_INVALID_PARAM; 149 } 150 CVMX_SYNCWS; 151 return CVMX_CMD_QUEUE_ALREADY_SETUP; 152 } else { 153 union cvmx_fpa_ctl_status status; 154 void *buffer; 155 156 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS); 157 if (!status.s.enb) { 158 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " 159 "FPA is not enabled.\n"); 160 return CVMX_CMD_QUEUE_NO_MEMORY; 161 } 162 buffer = cvmx_fpa_alloc(fpa_pool); 163 if (buffer == NULL) { 164 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " 165 "Unable to allocate initial buffer.\n"); 166 return CVMX_CMD_QUEUE_NO_MEMORY; 167 } 168 169 memset(qstate, 0, sizeof(*qstate)); 170 qstate->max_depth = max_depth; 171 qstate->fpa_pool = fpa_pool; 172 qstate->pool_size_m1 = (pool_size >> 3) - 1; 173 qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128; 174 /* 175 * We zeroed the now serving field so we need to also 176 * zero the ticket. 177 */ 178 __cvmx_cmd_queue_state_ptr-> 179 ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0; 180 CVMX_SYNCWS; 181 return CVMX_CMD_QUEUE_SUCCESS; 182 } 183} 184 185/* 186 * Shutdown a queue a free it's command buffers to the FPA. The 187 * hardware connected to the queue must be stopped before this 188 * function is called. 189 * 190 * @queue_id: Queue to shutdown 191 * 192 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code 193 */ 194cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id) 195{ 196 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); 197 if (qptr == NULL) { 198 cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to " 199 "get queue information.\n"); 200 return CVMX_CMD_QUEUE_INVALID_PARAM; 201 } 202 203 if (cvmx_cmd_queue_length(queue_id) > 0) { 204 cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still " 205 "has data in it.\n"); 206 return CVMX_CMD_QUEUE_FULL; 207 } 208 209 __cvmx_cmd_queue_lock(queue_id, qptr); 210 if (qptr->base_ptr_div128) { 211 cvmx_fpa_free(cvmx_phys_to_ptr 212 ((uint64_t) qptr->base_ptr_div128 << 7), 213 qptr->fpa_pool, 0); 214 qptr->base_ptr_div128 = 0; 215 } 216 __cvmx_cmd_queue_unlock(qptr); 217 218 return CVMX_CMD_QUEUE_SUCCESS; 219} 220 221/* 222 * Return the number of command words pending in the queue. This 223 * function may be relatively slow for some hardware units. 224 * 225 * @queue_id: Hardware command queue to query 226 * 227 * Returns Number of outstanding commands 228 */ 229int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id) 230{ 231 if (CVMX_ENABLE_PARAMETER_CHECKING) { 232 if (__cvmx_cmd_queue_get_state(queue_id) == NULL) 233 return CVMX_CMD_QUEUE_INVALID_PARAM; 234 } 235 236 /* 237 * The cast is here so gcc with check that all values in the 238 * cvmx_cmd_queue_id_t enumeration are here. 239 */ 240 switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) { 241 case CVMX_CMD_QUEUE_PKO_BASE: 242 /* 243 * FIXME: Need atomic lock on 244 * CVMX_PKO_REG_READ_IDX. Right now we are normally 245 * called with the queue lock, so that is a SLIGHT 246 * amount of protection. 247 */ 248 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff); 249 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) { 250 union cvmx_pko_mem_debug9 debug9; 251 debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9); 252 return debug9.cn38xx.doorbell; 253 } else { 254 union cvmx_pko_mem_debug8 debug8; 255 debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8); 256 return debug8.cn50xx.doorbell; 257 } 258 case CVMX_CMD_QUEUE_ZIP: 259 case CVMX_CMD_QUEUE_DFA: 260 case CVMX_CMD_QUEUE_RAID: 261 /* FIXME: Implement other lengths */ 262 return 0; 263 case CVMX_CMD_QUEUE_DMA_BASE: 264 { 265 union cvmx_npei_dmax_counts dmax_counts; 266 dmax_counts.u64 = 267 cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS 268 (queue_id & 0x7)); 269 return dmax_counts.s.dbell; 270 } 271 case CVMX_CMD_QUEUE_END: 272 return CVMX_CMD_QUEUE_INVALID_PARAM; 273 } 274 return CVMX_CMD_QUEUE_INVALID_PARAM; 275} 276 277/* 278 * Return the command buffer to be written to. The purpose of this 279 * function is to allow CVMX routine access t othe low level buffer 280 * for initial hardware setup. User applications should not call this 281 * function directly. 282 * 283 * @queue_id: Command queue to query 284 * 285 * Returns Command buffer or NULL on failure 286 */ 287void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id) 288{ 289 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); 290 if (qptr && qptr->base_ptr_div128) 291 return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7); 292 else 293 return NULL; 294}