0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/kernel.h>
0034
0035 #include <asm/octeon/octeon.h>
0036
0037 #include <asm/octeon/cvmx-config.h>
0038 #include <asm/octeon/cvmx-fpa.h>
0039 #include <asm/octeon/cvmx-cmd-queue.h>
0040
0041 #include <asm/octeon/cvmx-npei-defs.h>
0042 #include <asm/octeon/cvmx-pexp-defs.h>
0043 #include <asm/octeon/cvmx-pko-defs.h>
0044
0045
0046
0047
0048
0049 __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
0050 EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
0051
0052
0053
0054
0055
0056
0057 static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
0058 {
0059 char *alloc_name = "cvmx_cmd_queues";
0060 extern uint64_t octeon_reserve32_memory;
0061
0062 if (likely(__cvmx_cmd_queue_state_ptr))
0063 return CVMX_CMD_QUEUE_SUCCESS;
0064
0065 if (octeon_reserve32_memory)
0066 __cvmx_cmd_queue_state_ptr =
0067 cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
0068 octeon_reserve32_memory,
0069 octeon_reserve32_memory +
0070 (CONFIG_CAVIUM_RESERVE32 <<
0071 20) - 1, 128, alloc_name);
0072 else
0073 __cvmx_cmd_queue_state_ptr =
0074 cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
0075 128,
0076 alloc_name);
0077 if (__cvmx_cmd_queue_state_ptr)
0078 memset(__cvmx_cmd_queue_state_ptr, 0,
0079 sizeof(*__cvmx_cmd_queue_state_ptr));
0080 else {
0081 struct cvmx_bootmem_named_block_desc *block_desc =
0082 cvmx_bootmem_find_named_block(alloc_name);
0083 if (block_desc)
0084 __cvmx_cmd_queue_state_ptr =
0085 cvmx_phys_to_ptr(block_desc->base_addr);
0086 else {
0087 cvmx_dprintf
0088 ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
0089 alloc_name);
0090 return CVMX_CMD_QUEUE_NO_MEMORY;
0091 }
0092 }
0093 return CVMX_CMD_QUEUE_SUCCESS;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108 cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
0109 int max_depth, int fpa_pool,
0110 int pool_size)
0111 {
0112 __cvmx_cmd_queue_state_t *qstate;
0113 cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
0114 if (result != CVMX_CMD_QUEUE_SUCCESS)
0115 return result;
0116
0117 qstate = __cvmx_cmd_queue_get_state(queue_id);
0118 if (qstate == NULL)
0119 return CVMX_CMD_QUEUE_INVALID_PARAM;
0120
0121
0122
0123
0124
0125 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
0126 if ((max_depth < 0) || (max_depth > 1 << 20))
0127 return CVMX_CMD_QUEUE_INVALID_PARAM;
0128 } else if (max_depth != 0)
0129 return CVMX_CMD_QUEUE_INVALID_PARAM;
0130
0131 if ((fpa_pool < 0) || (fpa_pool > 7))
0132 return CVMX_CMD_QUEUE_INVALID_PARAM;
0133 if ((pool_size < 128) || (pool_size > 65536))
0134 return CVMX_CMD_QUEUE_INVALID_PARAM;
0135
0136
0137 if (qstate->base_ptr_div128) {
0138 if (max_depth != (int)qstate->max_depth) {
0139 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
0140 "Queue already initialized with different "
0141 "max_depth (%d).\n",
0142 (int)qstate->max_depth);
0143 return CVMX_CMD_QUEUE_INVALID_PARAM;
0144 }
0145 if (fpa_pool != qstate->fpa_pool) {
0146 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
0147 "Queue already initialized with different "
0148 "FPA pool (%u).\n",
0149 qstate->fpa_pool);
0150 return CVMX_CMD_QUEUE_INVALID_PARAM;
0151 }
0152 if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
0153 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
0154 "Queue already initialized with different "
0155 "FPA pool size (%u).\n",
0156 (qstate->pool_size_m1 + 1) << 3);
0157 return CVMX_CMD_QUEUE_INVALID_PARAM;
0158 }
0159 CVMX_SYNCWS;
0160 return CVMX_CMD_QUEUE_ALREADY_SETUP;
0161 } else {
0162 union cvmx_fpa_ctl_status status;
0163 void *buffer;
0164
0165 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
0166 if (!status.s.enb) {
0167 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
0168 "FPA is not enabled.\n");
0169 return CVMX_CMD_QUEUE_NO_MEMORY;
0170 }
0171 buffer = cvmx_fpa_alloc(fpa_pool);
0172 if (buffer == NULL) {
0173 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
0174 "Unable to allocate initial buffer.\n");
0175 return CVMX_CMD_QUEUE_NO_MEMORY;
0176 }
0177
0178 memset(qstate, 0, sizeof(*qstate));
0179 qstate->max_depth = max_depth;
0180 qstate->fpa_pool = fpa_pool;
0181 qstate->pool_size_m1 = (pool_size >> 3) - 1;
0182 qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
0183
0184
0185
0186
0187 __cvmx_cmd_queue_state_ptr->
0188 ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
0189 CVMX_SYNCWS;
0190 return CVMX_CMD_QUEUE_SUCCESS;
0191 }
0192 }
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203 cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
0204 {
0205 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
0206 if (qptr == NULL) {
0207 cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
0208 "get queue information.\n");
0209 return CVMX_CMD_QUEUE_INVALID_PARAM;
0210 }
0211
0212 if (cvmx_cmd_queue_length(queue_id) > 0) {
0213 cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
0214 "has data in it.\n");
0215 return CVMX_CMD_QUEUE_FULL;
0216 }
0217
0218 __cvmx_cmd_queue_lock(queue_id, qptr);
0219 if (qptr->base_ptr_div128) {
0220 cvmx_fpa_free(cvmx_phys_to_ptr
0221 ((uint64_t) qptr->base_ptr_div128 << 7),
0222 qptr->fpa_pool, 0);
0223 qptr->base_ptr_div128 = 0;
0224 }
0225 __cvmx_cmd_queue_unlock(qptr);
0226
0227 return CVMX_CMD_QUEUE_SUCCESS;
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238 int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
0239 {
0240 if (CVMX_ENABLE_PARAMETER_CHECKING) {
0241 if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
0242 return CVMX_CMD_QUEUE_INVALID_PARAM;
0243 }
0244
0245
0246
0247
0248
0249 switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
0250 case CVMX_CMD_QUEUE_PKO_BASE:
0251
0252
0253
0254
0255
0256
0257 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
0258 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
0259 union cvmx_pko_mem_debug9 debug9;
0260 debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
0261 return debug9.cn38xx.doorbell;
0262 } else {
0263 union cvmx_pko_mem_debug8 debug8;
0264 debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
0265 return debug8.cn50xx.doorbell;
0266 }
0267 case CVMX_CMD_QUEUE_ZIP:
0268 case CVMX_CMD_QUEUE_DFA:
0269 case CVMX_CMD_QUEUE_RAID:
0270
0271 return 0;
0272 case CVMX_CMD_QUEUE_DMA_BASE:
0273 {
0274 union cvmx_npei_dmax_counts dmax_counts;
0275 dmax_counts.u64 =
0276 cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
0277 (queue_id & 0x7));
0278 return dmax_counts.s.dbell;
0279 }
0280 case CVMX_CMD_QUEUE_END:
0281 return CVMX_CMD_QUEUE_INVALID_PARAM;
0282 }
0283 return CVMX_CMD_QUEUE_INVALID_PARAM;
0284 }
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
0297 {
0298 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
0299 if (qptr && qptr->base_ptr_div128)
0300 return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
0301 else
0302 return NULL;
0303 }