Back to home page

OSCL-LXR

 
 

    


0001 /***********************license start***************
0002  * Author: Cavium Networks
0003  *
0004  * Contact: support@caviumnetworks.com
0005  * This file is part of the OCTEON SDK
0006  *
0007  * Copyright (c) 2003-2008 Cavium Networks
0008  *
0009  * This file is free software; you can redistribute it and/or modify
0010  * it under the terms of the GNU General Public License, Version 2, as
0011  * published by the Free Software Foundation.
0012  *
0013  * This file is distributed in the hope that it will be useful, but
0014  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
0015  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
0016  * NONINFRINGEMENT.  See the GNU General Public License for more
0017  * details.
0018  *
0019  * You should have received a copy of the GNU General Public License
0020  * along with this file; if not, write to the Free Software
0021  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
0022  * or visit http://www.gnu.org/licenses/.
0023  *
0024  * This file may also be available under a different license from Cavium.
0025  * Contact Cavium Networks for more information
0026  ***********************license end**************************************/
0027 
0028 /**
0029  * Interface to the hardware Packet Order / Work unit.
0030  *
0031  * New, starting with SDK 1.7.0, cvmx-pow supports a number of
0032  * extended consistency checks. The define
0033  * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
0034  * internal state checks to find common programming errors. If
0035  * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
0036  * enabled. For example, cvmx-pow will check for the following
0037  * program errors or POW state inconsistency.
0038  * - Requesting a POW operation with an active tag switch in
0039  *   progress.
0040  * - Waiting for a tag switch to complete for an excessively
0041  *   long period. This is normally a sign of an error in locking
0042  *   causing deadlock.
0043  * - Illegal tag switches from NULL_NULL.
0044  * - Illegal tag switches from NULL.
0045  * - Illegal deschedule request.
0046  * - WQE pointer not matching the one attached to the core by
0047  *   the POW.
0048  *
0049  */
0050 
0051 #ifndef __CVMX_POW_H__
0052 #define __CVMX_POW_H__
0053 
0054 #include <asm/octeon/cvmx-pow-defs.h>
0055 
0056 #include <asm/octeon/cvmx-scratch.h>
0057 #include <asm/octeon/cvmx-wqe.h>
0058 
0059 /* Default to having all POW constancy checks turned on */
0060 #ifndef CVMX_ENABLE_POW_CHECKS
0061 #define CVMX_ENABLE_POW_CHECKS 1
0062 #endif
0063 
0064 enum cvmx_pow_tag_type {
0065     /* Tag ordering is maintained */
0066     CVMX_POW_TAG_TYPE_ORDERED   = 0L,
0067     /* Tag ordering is maintained, and at most one PP has the tag */
0068     CVMX_POW_TAG_TYPE_ATOMIC    = 1L,
0069     /*
0070      * The work queue entry from the order - NEVER tag switch from
0071      * NULL to NULL
0072      */
0073     CVMX_POW_TAG_TYPE_NULL      = 2L,
0074     /* A tag switch to NULL, and there is no space reserved in POW
0075      * - NEVER tag switch to NULL_NULL
0076      * - NEVER tag switch from NULL_NULL
0077      * - NULL_NULL is entered at the beginning of time and on a deschedule.
0078      * - NULL_NULL can be exited by a new work request. A NULL_SWITCH
0079      * load can also switch the state to NULL
0080      */
0081     CVMX_POW_TAG_TYPE_NULL_NULL = 3L
0082 };
0083 
0084 /**
0085  * Wait flag values for pow functions.
0086  */
0087 typedef enum {
0088     CVMX_POW_WAIT = 1,
0089     CVMX_POW_NO_WAIT = 0,
0090 } cvmx_pow_wait_t;
0091 
0092 /**
0093  *  POW tag operations.  These are used in the data stored to the POW.
0094  */
0095 typedef enum {
0096     /*
0097      * switch the tag (only) for this PP
0098      * - the previous tag should be non-NULL in this case
0099      * - tag switch response required
0100      * - fields used: op, type, tag
0101      */
0102     CVMX_POW_TAG_OP_SWTAG = 0L,
0103     /*
0104      * switch the tag for this PP, with full information
0105      * - this should be used when the previous tag is NULL
0106      * - tag switch response required
0107      * - fields used: address, op, grp, type, tag
0108      */
0109     CVMX_POW_TAG_OP_SWTAG_FULL = 1L,
0110     /*
0111      * switch the tag (and/or group) for this PP and de-schedule
0112      * - OK to keep the tag the same and only change the group
0113      * - fields used: op, no_sched, grp, type, tag
0114      */
0115     CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,
0116     /*
0117      * just de-schedule
0118      * - fields used: op, no_sched
0119      */
0120     CVMX_POW_TAG_OP_DESCH = 3L,
0121     /*
0122      * create an entirely new work queue entry
0123      * - fields used: address, op, qos, grp, type, tag
0124      */
0125     CVMX_POW_TAG_OP_ADDWQ = 4L,
0126     /*
0127      * just update the work queue pointer and grp for this PP
0128      * - fields used: address, op, grp
0129      */
0130     CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,
0131     /*
0132      * set the no_sched bit on the de-schedule list
0133      *
0134      * - does nothing if the selected entry is not on the
0135      *   de-schedule list
0136      *
0137      * - does nothing if the stored work queue pointer does not
0138      *   match the address field
0139      *
0140      * - fields used: address, index, op
0141      *
0142      *  Before issuing a *_NSCHED operation, SW must guarantee
0143      *  that all prior deschedules and set/clr NSCHED operations
0144      *  are complete and all prior switches are complete. The
0145      *  hardware provides the opsdone bit and swdone bit for SW
0146      *  polling. After issuing a *_NSCHED operation, SW must
0147      *  guarantee that the set/clr NSCHED is complete before any
0148      *  subsequent operations.
0149      */
0150     CVMX_POW_TAG_OP_SET_NSCHED = 6L,
0151     /*
0152      * clears the no_sched bit on the de-schedule list
0153      *
0154      * - does nothing if the selected entry is not on the
0155      *   de-schedule list
0156      *
0157      * - does nothing if the stored work queue pointer does not
0158      *   match the address field
0159      *
0160      * - fields used: address, index, op
0161      *
0162      * Before issuing a *_NSCHED operation, SW must guarantee that
0163      * all prior deschedules and set/clr NSCHED operations are
0164      * complete and all prior switches are complete. The hardware
0165      * provides the opsdone bit and swdone bit for SW
0166      * polling. After issuing a *_NSCHED operation, SW must
0167      * guarantee that the set/clr NSCHED is complete before any
0168      * subsequent operations.
0169      */
0170     CVMX_POW_TAG_OP_CLR_NSCHED = 7L,
0171     /* do nothing */
0172     CVMX_POW_TAG_OP_NOP = 15L
0173 } cvmx_pow_tag_op_t;
0174 
0175 /**
0176  * This structure defines the store data on a store to POW
0177  */
0178 typedef union {
0179     uint64_t u64;
0180     struct {
0181 #ifdef __BIG_ENDIAN_BITFIELD
0182         /*
0183          * Don't reschedule this entry. no_sched is used for
0184          * CVMX_POW_TAG_OP_SWTAG_DESCH and
0185          * CVMX_POW_TAG_OP_DESCH
0186          */
0187         uint64_t no_sched:1;
0188         uint64_t unused:2;
0189         /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
0190         uint64_t index:13;
0191         /* The operation to perform */
0192         cvmx_pow_tag_op_t op:4;
0193         uint64_t unused2:2;
0194         /*
0195          * The QOS level for the packet. qos is only used for
0196          * CVMX_POW_TAG_OP_ADDWQ
0197          */
0198         uint64_t qos:3;
0199         /*
0200          * The group that the work queue entry will be
0201          * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ,
0202          * CVMX_POW_TAG_OP_SWTAG_FULL,
0203          * CVMX_POW_TAG_OP_SWTAG_DESCH, and
0204          * CVMX_POW_TAG_OP_UPDATE_WQP_GRP
0205          */
0206         uint64_t grp:4;
0207         /*
0208          * The type of the tag. type is used for everything
0209          * except CVMX_POW_TAG_OP_DESCH,
0210          * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
0211          * CVMX_POW_TAG_OP_*_NSCHED
0212          */
0213         uint64_t type:3;
0214         /*
0215          * The actual tag. tag is used for everything except
0216          * CVMX_POW_TAG_OP_DESCH,
0217          * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
0218          * CVMX_POW_TAG_OP_*_NSCHED
0219          */
0220         uint64_t tag:32;
0221 #else
0222         uint64_t tag:32;
0223         uint64_t type:3;
0224         uint64_t grp:4;
0225         uint64_t qos:3;
0226         uint64_t unused2:2;
0227         cvmx_pow_tag_op_t op:4;
0228         uint64_t index:13;
0229         uint64_t unused:2;
0230         uint64_t no_sched:1;
0231 #endif
0232     } s;
0233 } cvmx_pow_tag_req_t;
0234 
0235 /**
0236  * This structure describes the address to load stuff from POW
0237  */
0238 typedef union {
0239     uint64_t u64;
0240 
0241     /**
0242      * Address for new work request loads (did<2:0> == 0)
0243      */
0244     struct {
0245 #ifdef __BIG_ENDIAN_BITFIELD
0246         /* Mips64 address region. Should be CVMX_IO_SEG */
0247         uint64_t mem_region:2;
0248         /* Must be zero */
0249         uint64_t reserved_49_61:13;
0250         /* Must be one */
0251         uint64_t is_io:1;
0252         /* the ID of POW -- did<2:0> == 0 in this case */
0253         uint64_t did:8;
0254         /* Must be zero */
0255         uint64_t reserved_4_39:36;
0256         /*
0257          * If set, don't return load response until work is
0258          * available.
0259          */
0260         uint64_t wait:1;
0261         /* Must be zero */
0262         uint64_t reserved_0_2:3;
0263 #else
0264         uint64_t reserved_0_2:3;
0265         uint64_t wait:1;
0266         uint64_t reserved_4_39:36;
0267         uint64_t did:8;
0268         uint64_t is_io:1;
0269         uint64_t reserved_49_61:13;
0270         uint64_t mem_region:2;
0271 #endif
0272     } swork;
0273 
0274     /**
0275      * Address for loads to get POW internal status
0276      */
0277     struct {
0278 #ifdef __BIG_ENDIAN_BITFIELD
0279         /* Mips64 address region. Should be CVMX_IO_SEG */
0280         uint64_t mem_region:2;
0281         /* Must be zero */
0282         uint64_t reserved_49_61:13;
0283         /* Must be one */
0284         uint64_t is_io:1;
0285         /* the ID of POW -- did<2:0> == 1 in this case */
0286         uint64_t did:8;
0287         /* Must be zero */
0288         uint64_t reserved_10_39:30;
0289         /* The core id to get status for */
0290         uint64_t coreid:4;
0291         /*
0292          * If set and get_cur is set, return reverse tag-list
0293          * pointer rather than forward tag-list pointer.
0294          */
0295         uint64_t get_rev:1;
0296         /*
0297          * If set, return current status rather than pending
0298          * status.
0299          */
0300         uint64_t get_cur:1;
0301         /*
0302          * If set, get the work-queue pointer rather than
0303          * tag/type.
0304          */
0305         uint64_t get_wqp:1;
0306         /* Must be zero */
0307         uint64_t reserved_0_2:3;
0308 #else
0309         uint64_t reserved_0_2:3;
0310         uint64_t get_wqp:1;
0311         uint64_t get_cur:1;
0312         uint64_t get_rev:1;
0313         uint64_t coreid:4;
0314         uint64_t reserved_10_39:30;
0315         uint64_t did:8;
0316         uint64_t is_io:1;
0317         uint64_t reserved_49_61:13;
0318         uint64_t mem_region:2;
0319 #endif
0320     } sstatus;
0321 
0322     /**
0323      * Address for memory loads to get POW internal state
0324      */
0325     struct {
0326 #ifdef __BIG_ENDIAN_BITFIELD
0327         /* Mips64 address region. Should be CVMX_IO_SEG */
0328         uint64_t mem_region:2;
0329         /* Must be zero */
0330         uint64_t reserved_49_61:13;
0331         /* Must be one */
0332         uint64_t is_io:1;
0333         /* the ID of POW -- did<2:0> == 2 in this case */
0334         uint64_t did:8;
0335         /* Must be zero */
0336         uint64_t reserved_16_39:24;
0337         /* POW memory index */
0338         uint64_t index:11;
0339         /*
0340          * If set, return deschedule information rather than
0341          * the standard response for work-queue index (invalid
0342          * if the work-queue entry is not on the deschedule
0343          * list).
0344          */
0345         uint64_t get_des:1;
0346         /*
0347          * If set, get the work-queue pointer rather than
0348          * tag/type (no effect when get_des set).
0349          */
0350         uint64_t get_wqp:1;
0351         /* Must be zero */
0352         uint64_t reserved_0_2:3;
0353 #else
0354         uint64_t reserved_0_2:3;
0355         uint64_t get_wqp:1;
0356         uint64_t get_des:1;
0357         uint64_t index:11;
0358         uint64_t reserved_16_39:24;
0359         uint64_t did:8;
0360         uint64_t is_io:1;
0361         uint64_t reserved_49_61:13;
0362         uint64_t mem_region:2;
0363 #endif
0364     } smemload;
0365 
0366     /**
0367      * Address for index/pointer loads
0368      */
0369     struct {
0370 #ifdef __BIG_ENDIAN_BITFIELD
0371         /* Mips64 address region. Should be CVMX_IO_SEG */
0372         uint64_t mem_region:2;
0373         /* Must be zero */
0374         uint64_t reserved_49_61:13;
0375         /* Must be one */
0376         uint64_t is_io:1;
0377         /* the ID of POW -- did<2:0> == 3 in this case */
0378         uint64_t did:8;
0379         /* Must be zero */
0380         uint64_t reserved_9_39:31;
0381         /*
0382          * when {get_rmt ==0 AND get_des_get_tail == 0}, this
0383          * field selects one of eight POW internal-input
0384          * queues (0-7), one per QOS level; values 8-15 are
0385          * illegal in this case; when {get_rmt ==0 AND
0386          * get_des_get_tail == 1}, this field selects one of
0387          * 16 deschedule lists (per group); when get_rmt ==1,
0388          * this field selects one of 16 memory-input queue
0389          * lists.  The two memory-input queue lists associated
0390          * with each QOS level are:
0391          *
0392          * - qosgrp = 0, qosgrp = 8:      QOS0
0393          * - qosgrp = 1, qosgrp = 9:      QOS1
0394          * - qosgrp = 2, qosgrp = 10:     QOS2
0395          * - qosgrp = 3, qosgrp = 11:     QOS3
0396          * - qosgrp = 4, qosgrp = 12:     QOS4
0397          * - qosgrp = 5, qosgrp = 13:     QOS5
0398          * - qosgrp = 6, qosgrp = 14:     QOS6
0399          * - qosgrp = 7, qosgrp = 15:     QOS7
0400          */
0401         uint64_t qosgrp:4;
0402         /*
0403          * If set and get_rmt is clear, return deschedule list
0404          * indexes rather than indexes for the specified qos
0405          * level; if set and get_rmt is set, return the tail
0406          * pointer rather than the head pointer for the
0407          * specified qos level.
0408          */
0409         uint64_t get_des_get_tail:1;
0410         /*
0411          * If set, return remote pointers rather than the
0412          * local indexes for the specified qos level.
0413          */
0414         uint64_t get_rmt:1;
0415         /* Must be zero */
0416         uint64_t reserved_0_2:3;
0417 #else
0418         uint64_t reserved_0_2:3;
0419         uint64_t get_rmt:1;
0420         uint64_t get_des_get_tail:1;
0421         uint64_t qosgrp:4;
0422         uint64_t reserved_9_39:31;
0423         uint64_t did:8;
0424         uint64_t is_io:1;
0425         uint64_t reserved_49_61:13;
0426         uint64_t mem_region:2;
0427 #endif
0428     } sindexload;
0429 
0430     /**
0431      * address for NULL_RD request (did<2:0> == 4) when this is read,
0432      * HW attempts to change the state to NULL if it is NULL_NULL (the
0433      * hardware cannot switch from NULL_NULL to NULL if a POW entry is
0434      * not available - software may need to recover by finishing
0435      * another piece of work before a POW entry can ever become
0436      * available.)
0437      */
0438     struct {
0439 #ifdef __BIG_ENDIAN_BITFIELD
0440         /* Mips64 address region. Should be CVMX_IO_SEG */
0441         uint64_t mem_region:2;
0442         /* Must be zero */
0443         uint64_t reserved_49_61:13;
0444         /* Must be one */
0445         uint64_t is_io:1;
0446         /* the ID of POW -- did<2:0> == 4 in this case */
0447         uint64_t did:8;
0448         /* Must be zero */
0449         uint64_t reserved_0_39:40;
0450 #else
0451         uint64_t reserved_0_39:40;
0452         uint64_t did:8;
0453         uint64_t is_io:1;
0454         uint64_t reserved_49_61:13;
0455         uint64_t mem_region:2;
0456 #endif
0457     } snull_rd;
0458 } cvmx_pow_load_addr_t;
0459 
0460 /**
0461  * This structure defines the response to a load/SENDSINGLE to POW
0462  * (except CSR reads)
0463  */
0464 typedef union {
0465     uint64_t u64;
0466 
0467     /**
0468      * Response to new work request loads
0469      */
0470     struct {
0471 #ifdef __BIG_ENDIAN_BITFIELD
0472         /*
0473          * Set when no new work queue entry was returned.  *
0474          * If there was de-scheduled work, the HW will
0475          * definitely return it. When this bit is set, it
0476          * could mean either mean:
0477          *
0478          * - There was no work, or
0479          *
0480          * - There was no work that the HW could find. This
0481          *   case can happen, regardless of the wait bit value
0482          *   in the original request, when there is work in
0483          *   the IQ's that is too deep down the list.
0484          */
0485         uint64_t no_work:1;
0486         /* Must be zero */
0487         uint64_t reserved_40_62:23;
0488         /* 36 in O1 -- the work queue pointer */
0489         uint64_t addr:40;
0490 #else
0491         uint64_t addr:40;
0492         uint64_t reserved_40_62:23;
0493         uint64_t no_work:1;
0494 #endif
0495     } s_work;
0496 
0497     /**
0498      * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
0499      */
0500     struct {
0501 #ifdef __BIG_ENDIAN_BITFIELD
0502         uint64_t reserved_62_63:2;
0503         /* Set when there is a pending non-NULL SWTAG or
0504          * SWTAG_FULL, and the POW entry has not left the list
0505          * for the original tag. */
0506         uint64_t pend_switch:1;
0507         /* Set when SWTAG_FULL and pend_switch is set. */
0508         uint64_t pend_switch_full:1;
0509         /*
0510          * Set when there is a pending NULL SWTAG, or an
0511          * implicit switch to NULL.
0512          */
0513         uint64_t pend_switch_null:1;
0514         /* Set when there is a pending DESCHED or SWTAG_DESCHED. */
0515         uint64_t pend_desched:1;
0516         /*
0517          * Set when there is a pending SWTAG_DESCHED and
0518          * pend_desched is set.
0519          */
0520         uint64_t pend_desched_switch:1;
0521         /* Set when nosched is desired and pend_desched is set. */
0522         uint64_t pend_nosched:1;
0523         /* Set when there is a pending GET_WORK. */
0524         uint64_t pend_new_work:1;
0525         /*
0526          * When pend_new_work is set, this bit indicates that
0527          * the wait bit was set.
0528          */
0529         uint64_t pend_new_work_wait:1;
0530         /* Set when there is a pending NULL_RD. */
0531         uint64_t pend_null_rd:1;
0532         /* Set when there is a pending CLR_NSCHED. */
0533         uint64_t pend_nosched_clr:1;
0534         uint64_t reserved_51:1;
0535         /* This is the index when pend_nosched_clr is set. */
0536         uint64_t pend_index:11;
0537         /*
0538          * This is the new_grp when (pend_desched AND
0539          * pend_desched_switch) is set.
0540          */
0541         uint64_t pend_grp:4;
0542         uint64_t reserved_34_35:2;
0543         /*
0544          * This is the tag type when pend_switch or
0545          * (pend_desched AND pend_desched_switch) are set.
0546          */
0547         uint64_t pend_type:2;
0548         /*
0549          * - this is the tag when pend_switch or (pend_desched
0550          *    AND pend_desched_switch) are set.
0551          */
0552         uint64_t pend_tag:32;
0553 #else
0554         uint64_t pend_tag:32;
0555         uint64_t pend_type:2;
0556         uint64_t reserved_34_35:2;
0557         uint64_t pend_grp:4;
0558         uint64_t pend_index:11;
0559         uint64_t reserved_51:1;
0560         uint64_t pend_nosched_clr:1;
0561         uint64_t pend_null_rd:1;
0562         uint64_t pend_new_work_wait:1;
0563         uint64_t pend_new_work:1;
0564         uint64_t pend_nosched:1;
0565         uint64_t pend_desched_switch:1;
0566         uint64_t pend_desched:1;
0567         uint64_t pend_switch_null:1;
0568         uint64_t pend_switch_full:1;
0569         uint64_t pend_switch:1;
0570         uint64_t reserved_62_63:2;
0571 #endif
0572     } s_sstatus0;
0573 
0574     /**
0575      * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
0576      */
0577     struct {
0578 #ifdef __BIG_ENDIAN_BITFIELD
0579         uint64_t reserved_62_63:2;
0580         /*
0581          * Set when there is a pending non-NULL SWTAG or
0582          * SWTAG_FULL, and the POW entry has not left the list
0583          * for the original tag.
0584          */
0585         uint64_t pend_switch:1;
0586         /* Set when SWTAG_FULL and pend_switch is set. */
0587         uint64_t pend_switch_full:1;
0588         /*
0589          * Set when there is a pending NULL SWTAG, or an
0590          * implicit switch to NULL.
0591          */
0592         uint64_t pend_switch_null:1;
0593         /*
0594          * Set when there is a pending DESCHED or
0595          * SWTAG_DESCHED.
0596          */
0597         uint64_t pend_desched:1;
0598         /*
0599          * Set when there is a pending SWTAG_DESCHED and
0600          * pend_desched is set.
0601          */
0602         uint64_t pend_desched_switch:1;
0603         /* Set when nosched is desired and pend_desched is set. */
0604         uint64_t pend_nosched:1;
0605         /* Set when there is a pending GET_WORK. */
0606         uint64_t pend_new_work:1;
0607         /*
0608          * When pend_new_work is set, this bit indicates that
0609          * the wait bit was set.
0610          */
0611         uint64_t pend_new_work_wait:1;
0612         /* Set when there is a pending NULL_RD. */
0613         uint64_t pend_null_rd:1;
0614         /* Set when there is a pending CLR_NSCHED. */
0615         uint64_t pend_nosched_clr:1;
0616         uint64_t reserved_51:1;
0617         /* This is the index when pend_nosched_clr is set. */
0618         uint64_t pend_index:11;
0619         /*
0620          * This is the new_grp when (pend_desched AND
0621          * pend_desched_switch) is set.
0622          */
0623         uint64_t pend_grp:4;
0624         /* This is the wqp when pend_nosched_clr is set. */
0625         uint64_t pend_wqp:36;
0626 #else
0627             uint64_t pend_wqp:36;
0628             uint64_t pend_grp:4;
0629             uint64_t pend_index:11;
0630             uint64_t reserved_51:1;
0631             uint64_t pend_nosched_clr:1;
0632             uint64_t pend_null_rd:1;
0633             uint64_t pend_new_work_wait:1;
0634             uint64_t pend_new_work:1;
0635             uint64_t pend_nosched:1;
0636             uint64_t pend_desched_switch:1;
0637             uint64_t pend_desched:1;
0638             uint64_t pend_switch_null:1;
0639             uint64_t pend_switch_full:1;
0640             uint64_t pend_switch:1;
0641             uint64_t reserved_62_63:2;
0642 #endif
0643     } s_sstatus1;
0644 
0645     /**
0646      * Result for a POW Status Load (when get_cur==1, get_wqp==0, and
0647      * get_rev==0)
0648      */
0649     struct {
0650 #ifdef __BIG_ENDIAN_BITFIELD
0651         uint64_t reserved_62_63:2;
0652         /*
0653          * Points to the next POW entry in the tag list when
0654          * tail == 0 (and tag_type is not NULL or NULL_NULL).
0655          */
0656         uint64_t link_index:11;
0657         /* The POW entry attached to the core. */
0658         uint64_t index:11;
0659         /*
0660          * The group attached to the core (updated when new
0661          * tag list entered on SWTAG_FULL).
0662          */
0663         uint64_t grp:4;
0664         /*
0665          * Set when this POW entry is at the head of its tag
0666          * list (also set when in the NULL or NULL_NULL
0667          * state).
0668          */
0669         uint64_t head:1;
0670         /*
0671          * Set when this POW entry is at the tail of its tag
0672          * list (also set when in the NULL or NULL_NULL
0673          * state).
0674          */
0675         uint64_t tail:1;
0676         /*
0677          * The tag type attached to the core (updated when new
0678          * tag list entered on SWTAG, SWTAG_FULL, or
0679          * SWTAG_DESCHED).
0680          */
0681         uint64_t tag_type:2;
0682         /*
0683          * The tag attached to the core (updated when new tag
0684          * list entered on SWTAG, SWTAG_FULL, or
0685          * SWTAG_DESCHED).
0686          */
0687         uint64_t tag:32;
0688 #else
0689             uint64_t tag:32;
0690             uint64_t tag_type:2;
0691             uint64_t tail:1;
0692             uint64_t head:1;
0693             uint64_t grp:4;
0694             uint64_t index:11;
0695             uint64_t link_index:11;
0696             uint64_t reserved_62_63:2;
0697 #endif
0698     } s_sstatus2;
0699 
0700     /**
0701      * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
0702      */
0703     struct {
0704 #ifdef __BIG_ENDIAN_BITFIELD
0705         uint64_t reserved_62_63:2;
0706         /*
0707          * Points to the prior POW entry in the tag list when
0708          * head == 0 (and tag_type is not NULL or
0709          * NULL_NULL). This field is unpredictable when the
0710          * core's state is NULL or NULL_NULL.
0711          */
0712         uint64_t revlink_index:11;
0713         /* The POW entry attached to the core. */
0714         uint64_t index:11;
0715         /*
0716          * The group attached to the core (updated when new
0717          * tag list entered on SWTAG_FULL).
0718          */
0719         uint64_t grp:4;
0720         /* Set when this POW entry is at the head of its tag
0721          * list (also set when in the NULL or NULL_NULL
0722          * state).
0723          */
0724         uint64_t head:1;
0725         /*
0726          * Set when this POW entry is at the tail of its tag
0727          * list (also set when in the NULL or NULL_NULL
0728          * state).
0729          */
0730         uint64_t tail:1;
0731         /*
0732          * The tag type attached to the core (updated when new
0733          * tag list entered on SWTAG, SWTAG_FULL, or
0734          * SWTAG_DESCHED).
0735          */
0736         uint64_t tag_type:2;
0737         /*
0738          * The tag attached to the core (updated when new tag
0739          * list entered on SWTAG, SWTAG_FULL, or
0740          * SWTAG_DESCHED).
0741          */
0742         uint64_t tag:32;
0743 #else
0744             uint64_t tag:32;
0745             uint64_t tag_type:2;
0746             uint64_t tail:1;
0747             uint64_t head:1;
0748             uint64_t grp:4;
0749             uint64_t index:11;
0750             uint64_t revlink_index:11;
0751             uint64_t reserved_62_63:2;
0752 #endif
0753     } s_sstatus3;
0754 
0755     /**
0756      * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
0757      * get_rev==0)
0758      */
0759     struct {
0760 #ifdef __BIG_ENDIAN_BITFIELD
0761         uint64_t reserved_62_63:2;
0762         /*
0763          * Points to the next POW entry in the tag list when
0764          * tail == 0 (and tag_type is not NULL or NULL_NULL).
0765          */
0766         uint64_t link_index:11;
0767         /* The POW entry attached to the core. */
0768         uint64_t index:11;
0769         /*
0770          * The group attached to the core (updated when new
0771          * tag list entered on SWTAG_FULL).
0772          */
0773         uint64_t grp:4;
0774         /*
0775          * The wqp attached to the core (updated when new tag
0776          * list entered on SWTAG_FULL).
0777          */
0778         uint64_t wqp:36;
0779 #else
0780             uint64_t wqp:36;
0781             uint64_t grp:4;
0782             uint64_t index:11;
0783             uint64_t link_index:11;
0784             uint64_t reserved_62_63:2;
0785 #endif
0786     } s_sstatus4;
0787 
0788     /**
0789      * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
0790      * get_rev==1)
0791      */
0792     struct {
0793 #ifdef __BIG_ENDIAN_BITFIELD
0794         uint64_t reserved_62_63:2;
0795         /*
0796          * Points to the prior POW entry in the tag list when
0797          * head == 0 (and tag_type is not NULL or
0798          * NULL_NULL). This field is unpredictable when the
0799          * core's state is NULL or NULL_NULL.
0800          */
0801         uint64_t revlink_index:11;
0802         /* The POW entry attached to the core. */
0803         uint64_t index:11;
0804         /*
0805          * The group attached to the core (updated when new
0806          * tag list entered on SWTAG_FULL).
0807          */
0808         uint64_t grp:4;
0809         /*
0810          * The wqp attached to the core (updated when new tag
0811          * list entered on SWTAG_FULL).
0812          */
0813         uint64_t wqp:36;
0814 #else
0815             uint64_t wqp:36;
0816             uint64_t grp:4;
0817             uint64_t index:11;
0818             uint64_t revlink_index:11;
0819             uint64_t reserved_62_63:2;
0820 #endif
0821     } s_sstatus5;
0822 
0823     /**
0824      * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
0825      */
0826     struct {
0827 #ifdef __BIG_ENDIAN_BITFIELD
0828         uint64_t reserved_51_63:13;
0829         /*
0830          * The next entry in the input, free, descheduled_head
0831          * list (unpredictable if entry is the tail of the
0832          * list).
0833          */
0834         uint64_t next_index:11;
0835         /* The group of the POW entry. */
0836         uint64_t grp:4;
0837         uint64_t reserved_35:1;
0838         /*
0839          * Set when this POW entry is at the tail of its tag
0840          * list (also set when in the NULL or NULL_NULL
0841          * state).
0842          */
0843         uint64_t tail:1;
0844         /* The tag type of the POW entry. */
0845         uint64_t tag_type:2;
0846         /* The tag of the POW entry. */
0847         uint64_t tag:32;
0848 #else
0849             uint64_t tag:32;
0850             uint64_t tag_type:2;
0851             uint64_t tail:1;
0852             uint64_t reserved_35:1;
0853             uint64_t grp:4;
0854             uint64_t next_index:11;
0855             uint64_t reserved_51_63:13;
0856 #endif
0857     } s_smemload0;
0858 
0859     /**
0860      * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
0861      */
0862     struct {
0863 #ifdef __BIG_ENDIAN_BITFIELD
0864         uint64_t reserved_51_63:13;
0865         /*
0866          * The next entry in the input, free, descheduled_head
0867          * list (unpredictable if entry is the tail of the
0868          * list).
0869          */
0870         uint64_t next_index:11;
0871         /* The group of the POW entry. */
0872         uint64_t grp:4;
0873         /* The WQP held in the POW entry. */
0874         uint64_t wqp:36;
0875 #else
0876             uint64_t wqp:36;
0877             uint64_t grp:4;
0878             uint64_t next_index:11;
0879             uint64_t reserved_51_63:13;
0880 #endif
0881     } s_smemload1;
0882 
0883     /**
0884      * Result For POW Memory Load (get_des == 1)
0885      */
0886     struct {
0887 #ifdef __BIG_ENDIAN_BITFIELD
0888         uint64_t reserved_51_63:13;
0889         /*
0890          * The next entry in the tag list connected to the
0891          * descheduled head.
0892          */
0893         uint64_t fwd_index:11;
0894         /* The group of the POW entry. */
0895         uint64_t grp:4;
0896         /* The nosched bit for the POW entry. */
0897         uint64_t nosched:1;
0898         /* There is a pending tag switch */
0899         uint64_t pend_switch:1;
0900         /*
0901          * The next tag type for the new tag list when
0902          * pend_switch is set.
0903          */
0904         uint64_t pend_type:2;
0905         /*
0906          * The next tag for the new tag list when pend_switch
0907          * is set.
0908          */
0909         uint64_t pend_tag:32;
0910 #else
0911             uint64_t pend_tag:32;
0912             uint64_t pend_type:2;
0913             uint64_t pend_switch:1;
0914             uint64_t nosched:1;
0915             uint64_t grp:4;
0916             uint64_t fwd_index:11;
0917             uint64_t reserved_51_63:13;
0918 #endif
0919     } s_smemload2;
0920 
0921     /**
0922      * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
0923      */
0924     struct {
0925 #ifdef __BIG_ENDIAN_BITFIELD
0926         uint64_t reserved_52_63:12;
0927         /*
0928          * set when there is one or more POW entries on the
0929          * free list.
0930          */
0931         uint64_t free_val:1;
0932         /*
0933          * set when there is exactly one POW entry on the free
0934          * list.
0935          */
0936         uint64_t free_one:1;
0937         uint64_t reserved_49:1;
0938         /*
0939          * when free_val is set, indicates the first entry on
0940          * the free list.
0941          */
0942         uint64_t free_head:11;
0943         uint64_t reserved_37:1;
0944         /*
0945          * when free_val is set, indicates the last entry on
0946          * the free list.
0947          */
0948         uint64_t free_tail:11;
0949         /*
0950          * set when there is one or more POW entries on the
0951          * input Q list selected by qosgrp.
0952          */
0953         uint64_t loc_val:1;
0954         /*
0955          * set when there is exactly one POW entry on the
0956          * input Q list selected by qosgrp.
0957          */
0958         uint64_t loc_one:1;
0959         uint64_t reserved_23:1;
0960         /*
0961          * when loc_val is set, indicates the first entry on
0962          * the input Q list selected by qosgrp.
0963          */
0964         uint64_t loc_head:11;
0965         uint64_t reserved_11:1;
0966         /*
0967          * when loc_val is set, indicates the last entry on
0968          * the input Q list selected by qosgrp.
0969          */
0970         uint64_t loc_tail:11;
0971 #else
0972             uint64_t loc_tail:11;
0973             uint64_t reserved_11:1;
0974             uint64_t loc_head:11;
0975             uint64_t reserved_23:1;
0976             uint64_t loc_one:1;
0977             uint64_t loc_val:1;
0978             uint64_t free_tail:11;
0979             uint64_t reserved_37:1;
0980             uint64_t free_head:11;
0981             uint64_t reserved_49:1;
0982             uint64_t free_one:1;
0983             uint64_t free_val:1;
0984             uint64_t reserved_52_63:12;
0985 #endif
0986     } sindexload0;
0987 
0988     /**
0989      * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
0990      */
0991     struct {
0992 #ifdef __BIG_ENDIAN_BITFIELD
0993         uint64_t reserved_52_63:12;
0994         /*
0995          * set when there is one or more POW entries on the
0996          * nosched list.
0997          */
0998         uint64_t nosched_val:1;
0999         /*
1000          * set when there is exactly one POW entry on the
1001          * nosched list.
1002          */
1003         uint64_t nosched_one:1;
1004         uint64_t reserved_49:1;
1005         /*
1006          * when nosched_val is set, indicates the first entry
1007          * on the nosched list.
1008          */
1009         uint64_t nosched_head:11;
1010         uint64_t reserved_37:1;
1011         /*
1012          * when nosched_val is set, indicates the last entry
1013          * on the nosched list.
1014          */
1015         uint64_t nosched_tail:11;
1016         /*
1017          * set when there is one or more descheduled heads on
1018          * the descheduled list selected by qosgrp.
1019          */
1020         uint64_t des_val:1;
1021         /*
1022          * set when there is exactly one descheduled head on
1023          * the descheduled list selected by qosgrp.
1024          */
1025         uint64_t des_one:1;
1026         uint64_t reserved_23:1;
1027         /*
1028          * when des_val is set, indicates the first
1029          * descheduled head on the descheduled list selected
1030          * by qosgrp.
1031          */
1032         uint64_t des_head:11;
1033         uint64_t reserved_11:1;
1034         /*
1035          * when des_val is set, indicates the last descheduled
1036          * head on the descheduled list selected by qosgrp.
1037          */
1038         uint64_t des_tail:11;
1039 #else
1040             uint64_t des_tail:11;
1041             uint64_t reserved_11:1;
1042             uint64_t des_head:11;
1043             uint64_t reserved_23:1;
1044             uint64_t des_one:1;
1045             uint64_t des_val:1;
1046             uint64_t nosched_tail:11;
1047             uint64_t reserved_37:1;
1048             uint64_t nosched_head:11;
1049             uint64_t reserved_49:1;
1050             uint64_t nosched_one:1;
1051             uint64_t nosched_val:1;
1052             uint64_t reserved_52_63:12;
1053 #endif
1054     } sindexload1;
1055 
1056     /**
1057      * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
1058      */
1059     struct {
1060 #ifdef __BIG_ENDIAN_BITFIELD
1061         uint64_t reserved_39_63:25;
1062         /*
1063          * Set when this DRAM list is the current head
1064          * (i.e. is the next to be reloaded when the POW
1065          * hardware reloads a POW entry from DRAM). The POW
1066          * hardware alternates between the two DRAM lists
1067          * associated with a QOS level when it reloads work
1068          * from DRAM into the POW unit.
1069          */
1070         uint64_t rmt_is_head:1;
1071         /*
1072          * Set when the DRAM portion of the input Q list
1073          * selected by qosgrp contains one or more pieces of
1074          * work.
1075          */
1076         uint64_t rmt_val:1;
1077         /*
1078          * Set when the DRAM portion of the input Q list
1079          * selected by qosgrp contains exactly one piece of
1080          * work.
1081          */
1082         uint64_t rmt_one:1;
1083         /*
1084          * When rmt_val is set, indicates the first piece of
1085          * work on the DRAM input Q list selected by
1086          * qosgrp.
1087          */
1088         uint64_t rmt_head:36;
1089 #else
1090             uint64_t rmt_head:36;
1091             uint64_t rmt_one:1;
1092             uint64_t rmt_val:1;
1093             uint64_t rmt_is_head:1;
1094             uint64_t reserved_39_63:25;
1095 #endif
1096     } sindexload2;
1097 
1098     /**
1099      * Result For POW Index/Pointer Load (get_rmt ==
1100      * 1/get_des_get_tail == 1)
1101      */
1102     struct {
1103 #ifdef __BIG_ENDIAN_BITFIELD
1104         uint64_t reserved_39_63:25;
1105         /*
1106          * set when this DRAM list is the current head
1107          * (i.e. is the next to be reloaded when the POW
1108          * hardware reloads a POW entry from DRAM). The POW
1109          * hardware alternates between the two DRAM lists
1110          * associated with a QOS level when it reloads work
1111          * from DRAM into the POW unit.
1112          */
1113         uint64_t rmt_is_head:1;
1114         /*
1115          * set when the DRAM portion of the input Q list
1116          * selected by qosgrp contains one or more pieces of
1117          * work.
1118          */
1119         uint64_t rmt_val:1;
1120         /*
1121          * set when the DRAM portion of the input Q list
1122          * selected by qosgrp contains exactly one piece of
1123          * work.
1124          */
1125         uint64_t rmt_one:1;
1126         /*
1127          * when rmt_val is set, indicates the last piece of
1128          * work on the DRAM input Q list selected by
1129          * qosgrp.
1130          */
1131         uint64_t rmt_tail:36;
1132 #else
1133             uint64_t rmt_tail:36;
1134             uint64_t rmt_one:1;
1135             uint64_t rmt_val:1;
1136             uint64_t rmt_is_head:1;
1137             uint64_t reserved_39_63:25;
1138 #endif
1139     } sindexload3;
1140 
1141     /**
1142      * Response to NULL_RD request loads
1143      */
1144     struct {
1145 #ifdef __BIG_ENDIAN_BITFIELD
1146         uint64_t unused:62;
1147         /* of type cvmx_pow_tag_type_t. state is one of the
1148          * following:
1149          *
1150          * - CVMX_POW_TAG_TYPE_ORDERED
1151          * - CVMX_POW_TAG_TYPE_ATOMIC
1152          * - CVMX_POW_TAG_TYPE_NULL
1153          * - CVMX_POW_TAG_TYPE_NULL_NULL
1154          */
1155         uint64_t state:2;
1156 #else
1157             uint64_t state:2;
1158             uint64_t unused:62;
1159 #endif
1160     } s_null_rd;
1161 
1162 } cvmx_pow_tag_load_resp_t;
1163 
1164 /**
1165  * This structure describes the address used for stores to the POW.
1166  *  The store address is meaningful on stores to the POW.  The
1167  *  hardware assumes that an aligned 64-bit store was used for all
1168  *  these stores.  Note the assumption that the work queue entry is
1169  *  aligned on an 8-byte boundary (since the low-order 3 address bits
1170  *  must be zero).  Note that not all fields are used by all
1171  *  operations.
1172  *
1173  *  NOTE: The following is the behavior of the pending switch bit at the PP
1174  *   for POW stores (i.e. when did<7:3> == 0xc)
1175  *     - did<2:0> == 0      => pending switch bit is set
1176  *     - did<2:0> == 1      => no affect on the pending switch bit
1177  *     - did<2:0> == 3      => pending switch bit is cleared
1178  *     - did<2:0> == 7      => no affect on the pending switch bit
1179  *     - did<2:0> == others => must not be used
1180  *     - No other loads/stores have an affect on the pending switch bit
1181  *     - The switch bus from POW can clear the pending switch bit
1182  *
1183  *  NOTE: did<2:0> == 2 is used by the HW for a special single-cycle
1184  *  ADDWQ command that only contains the pointer). SW must never use
1185  *  did<2:0> == 2.
1186  */
1187 typedef union {
1188     /**
1189      * Unsigned 64 bit integer representation of store address
1190      */
1191     uint64_t u64;
1192 
1193     struct {
1194 #ifdef __BIG_ENDIAN_BITFIELD
1195         /* Memory region.  Should be CVMX_IO_SEG in most cases */
1196         uint64_t mem_reg:2;
1197         uint64_t reserved_49_61:13; /* Must be zero */
1198         uint64_t is_io:1;   /* Must be one */
1199         /* Device ID of POW.  Note that different sub-dids are used. */
1200         uint64_t did:8;
1201         uint64_t reserved_36_39:4;  /* Must be zero */
1202         /* Address field. addr<2:0> must be zero */
1203         uint64_t addr:36;
1204 #else
1205             uint64_t addr:36;
1206             uint64_t reserved_36_39:4;
1207             uint64_t did:8;
1208             uint64_t is_io:1;
1209             uint64_t reserved_49_61:13;
1210             uint64_t mem_reg:2;
1211 #endif
1212     } stag;
1213 } cvmx_pow_tag_store_addr_t;
1214 
1215 /**
1216  * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
1217  */
1218 typedef union {
1219     uint64_t u64;
1220 
1221     struct {
1222 #ifdef __BIG_ENDIAN_BITFIELD
1223         /*
1224          * the (64-bit word) location in scratchpad to write
1225          * to (if len != 0)
1226          */
1227         uint64_t scraddr:8;
1228         /* the number of words in the response (0 => no response) */
1229         uint64_t len:8;
1230         /* the ID of the device on the non-coherent bus */
1231         uint64_t did:8;
1232         uint64_t unused:36;
1233         /* if set, don't return load response until work is available */
1234         uint64_t wait:1;
1235         uint64_t unused2:3;
1236 #else
1237             uint64_t unused2:3;
1238             uint64_t wait:1;
1239             uint64_t unused:36;
1240             uint64_t did:8;
1241             uint64_t len:8;
1242             uint64_t scraddr:8;
1243 #endif
1244     } s;
1245 
1246 } cvmx_pow_iobdma_store_t;
1247 
1248 /* CSR typedefs have been moved to cvmx-csr-*.h */
1249 
1250 /**
1251  * Get the POW tag for this core. This returns the current
1252  * tag type, tag, group, and POW entry index associated with
1253  * this core. Index is only valid if the tag type isn't NULL_NULL.
1254  * If a tag switch is pending this routine returns the tag before
1255  * the tag switch, not after.
1256  *
1257  * Returns Current tag
1258  */
1259 static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
1260 {
1261     cvmx_pow_load_addr_t load_addr;
1262     cvmx_pow_tag_load_resp_t load_resp;
1263     cvmx_pow_tag_req_t result;
1264 
1265     load_addr.u64 = 0;
1266     load_addr.sstatus.mem_region = CVMX_IO_SEG;
1267     load_addr.sstatus.is_io = 1;
1268     load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1269     load_addr.sstatus.coreid = cvmx_get_core_num();
1270     load_addr.sstatus.get_cur = 1;
1271     load_resp.u64 = cvmx_read_csr(load_addr.u64);
1272     result.u64 = 0;
1273     result.s.grp = load_resp.s_sstatus2.grp;
1274     result.s.index = load_resp.s_sstatus2.index;
1275     result.s.type = load_resp.s_sstatus2.tag_type;
1276     result.s.tag = load_resp.s_sstatus2.tag;
1277     return result;
1278 }
1279 
1280 /**
1281  * Get the POW WQE for this core. This returns the work queue
1282  * entry currently associated with this core.
1283  *
1284  * Returns WQE pointer
1285  */
1286 static inline struct cvmx_wqe *cvmx_pow_get_current_wqp(void)
1287 {
1288     cvmx_pow_load_addr_t load_addr;
1289     cvmx_pow_tag_load_resp_t load_resp;
1290 
1291     load_addr.u64 = 0;
1292     load_addr.sstatus.mem_region = CVMX_IO_SEG;
1293     load_addr.sstatus.is_io = 1;
1294     load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1295     load_addr.sstatus.coreid = cvmx_get_core_num();
1296     load_addr.sstatus.get_cur = 1;
1297     load_addr.sstatus.get_wqp = 1;
1298     load_resp.u64 = cvmx_read_csr(load_addr.u64);
1299     return (struct cvmx_wqe *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
1300 }
1301 
1302 #ifndef CVMX_MF_CHORD
1303 #define CVMX_MF_CHORD(dest)     CVMX_RDHWR(dest, 30)
1304 #endif
1305 
1306 /**
1307  * Print a warning if a tag switch is pending for this core
1308  *
1309  * @function: Function name checking for a pending tag switch
1310  */
1311 static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
1312 {
1313     uint64_t switch_complete;
1314     CVMX_MF_CHORD(switch_complete);
1315     if (!switch_complete)
1316         pr_warn("%s called with tag switch in progress\n", function);
1317 }
1318 
1319 /**
1320  * Waits for a tag switch to complete by polling the completion bit.
1321  * Note that switches to NULL complete immediately and do not need
1322  * to be waited for.
1323  */
1324 static inline void cvmx_pow_tag_sw_wait(void)
1325 {
1326     const uint64_t MAX_CYCLES = 1ull << 31;
1327     uint64_t switch_complete;
1328     uint64_t start_cycle = cvmx_get_cycle();
1329     while (1) {
1330         CVMX_MF_CHORD(switch_complete);
1331         if (unlikely(switch_complete))
1332             break;
1333         if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
1334             pr_warn("Tag switch is taking a long time, possible deadlock\n");
1335             start_cycle = -MAX_CYCLES - 1;
1336         }
1337     }
1338 }
1339 
1340 /**
1341  * Synchronous work request.  Requests work from the POW.
1342  * This function does NOT wait for previous tag switches to complete,
1343  * so the caller must ensure that there is not a pending tag switch.
1344  *
1345  * @wait:   When set, call stalls until work becomes avaiable, or times out.
1346  *       If not set, returns immediately.
1347  *
1348  * Returns: the WQE pointer from POW. Returns NULL if no work
1349  * was available.
1350  */
1351 static inline struct cvmx_wqe *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
1352                                  wait)
1353 {
1354     cvmx_pow_load_addr_t ptr;
1355     cvmx_pow_tag_load_resp_t result;
1356 
1357     if (CVMX_ENABLE_POW_CHECKS)
1358         __cvmx_pow_warn_if_pending_switch(__func__);
1359 
1360     ptr.u64 = 0;
1361     ptr.swork.mem_region = CVMX_IO_SEG;
1362     ptr.swork.is_io = 1;
1363     ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
1364     ptr.swork.wait = wait;
1365 
1366     result.u64 = cvmx_read_csr(ptr.u64);
1367 
1368     if (result.s_work.no_work)
1369         return NULL;
1370     else
1371         return (struct cvmx_wqe *) cvmx_phys_to_ptr(result.s_work.addr);
1372 }
1373 
1374 /**
1375  * Synchronous work request.  Requests work from the POW.
1376  * This function waits for any previous tag switch to complete before
1377  * requesting the new work.
1378  *
1379  * @wait:   When set, call stalls until work becomes avaiable, or times out.
1380  *       If not set, returns immediately.
1381  *
1382  * Returns: the WQE pointer from POW. Returns NULL if no work
1383  * was available.
1384  */
1385 static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
1386 {
1387     if (CVMX_ENABLE_POW_CHECKS)
1388         __cvmx_pow_warn_if_pending_switch(__func__);
1389 
1390     /* Must not have a switch pending when requesting work */
1391     cvmx_pow_tag_sw_wait();
1392     return cvmx_pow_work_request_sync_nocheck(wait);
1393 
1394 }
1395 
1396 /**
1397  * Synchronous null_rd request.  Requests a switch out of NULL_NULL POW state.
1398  * This function waits for any previous tag switch to complete before
1399  * requesting the null_rd.
1400  *
1401  * Returns: the POW state of type cvmx_pow_tag_type_t.
1402  */
1403 static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
1404 {
1405     cvmx_pow_load_addr_t ptr;
1406     cvmx_pow_tag_load_resp_t result;
1407 
1408     if (CVMX_ENABLE_POW_CHECKS)
1409         __cvmx_pow_warn_if_pending_switch(__func__);
1410 
1411     /* Must not have a switch pending when requesting work */
1412     cvmx_pow_tag_sw_wait();
1413 
1414     ptr.u64 = 0;
1415     ptr.snull_rd.mem_region = CVMX_IO_SEG;
1416     ptr.snull_rd.is_io = 1;
1417     ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
1418 
1419     result.u64 = cvmx_read_csr(ptr.u64);
1420 
1421     return (enum cvmx_pow_tag_type) result.s_null_rd.state;
1422 }
1423 
1424 /**
1425  * Asynchronous work request.  Work is requested from the POW unit,
1426  * and should later be checked with function
1427  * cvmx_pow_work_response_async.  This function does NOT wait for
1428  * previous tag switches to complete, so the caller must ensure that
1429  * there is not a pending tag switch.
1430  *
1431  * @scr_addr: Scratch memory address that response will be returned
1432  *        to, which is either a valid WQE, or a response with the
1433  *        invalid bit set.  Byte address, must be 8 byte aligned.
1434  *
1435  * @wait: 1 to cause response to wait for work to become available (or
1436  *    timeout), 0 to cause response to return immediately
1437  */
1438 static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
1439                                cvmx_pow_wait_t wait)
1440 {
1441     cvmx_pow_iobdma_store_t data;
1442 
1443     if (CVMX_ENABLE_POW_CHECKS)
1444         __cvmx_pow_warn_if_pending_switch(__func__);
1445 
1446     /* scr_addr must be 8 byte aligned */
1447     data.s.scraddr = scr_addr >> 3;
1448     data.s.len = 1;
1449     data.s.did = CVMX_OCT_DID_TAG_SWTAG;
1450     data.s.wait = wait;
1451     cvmx_send_single(data.u64);
1452 }
1453 
1454 /**
1455  * Asynchronous work request.  Work is requested from the POW unit,
1456  * and should later be checked with function
1457  * cvmx_pow_work_response_async.  This function waits for any previous
1458  * tag switch to complete before requesting the new work.
1459  *
1460  * @scr_addr: Scratch memory address that response will be returned
1461  *        to, which is either a valid WQE, or a response with the
1462  *        invalid bit set.  Byte address, must be 8 byte aligned.
1463  *
1464  * @wait: 1 to cause response to wait for work to become available (or
1465  *          timeout), 0 to cause response to return immediately
1466  */
1467 static inline void cvmx_pow_work_request_async(int scr_addr,
1468                            cvmx_pow_wait_t wait)
1469 {
1470     if (CVMX_ENABLE_POW_CHECKS)
1471         __cvmx_pow_warn_if_pending_switch(__func__);
1472 
1473     /* Must not have a switch pending when requesting work */
1474     cvmx_pow_tag_sw_wait();
1475     cvmx_pow_work_request_async_nocheck(scr_addr, wait);
1476 }
1477 
1478 /**
1479  * Gets result of asynchronous work request.  Performs a IOBDMA sync
1480  * to wait for the response.
1481  *
1482  * @scr_addr: Scratch memory address to get result from Byte address,
1483  *        must be 8 byte aligned.
1484  *
1485  * Returns: the WQE from the scratch register, or NULL if no
1486  * work was available.
1487  */
1488 static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
1489 {
1490     cvmx_pow_tag_load_resp_t result;
1491 
1492     CVMX_SYNCIOBDMA;
1493     result.u64 = cvmx_scratch_read64(scr_addr);
1494 
1495     if (result.s_work.no_work)
1496         return NULL;
1497     else
1498         return (struct cvmx_wqe *) cvmx_phys_to_ptr(result.s_work.addr);
1499 }
1500 
1501 /**
1502  * Checks if a work queue entry pointer returned by a work
1503  * request is valid.  It may be invalid due to no work
1504  * being available or due to a timeout.
1505  *
1506  * @wqe_ptr: pointer to a work queue entry returned by the POW
1507  *
1508  * Returns 0 if pointer is valid
1509  *     1 if invalid (no work was returned)
1510  */
1511 static inline uint64_t cvmx_pow_work_invalid(struct cvmx_wqe *wqe_ptr)
1512 {
1513     return wqe_ptr == NULL;
1514 }
1515 
1516 /**
1517  * Starts a tag switch to the provided tag value and tag type.
1518  * Completion for the tag switch must be checked for separately.  This
1519  * function does NOT update the work queue entry in dram to match tag
1520  * value and type, so the application must keep track of these if they
1521  * are important to the application.  This tag switch command must not
1522  * be used for switches to NULL, as the tag switch pending bit will be
1523  * set by the switch request, but never cleared by the hardware.
1524  *
1525  * NOTE: This should not be used when switching from a NULL tag.  Use
1526  * cvmx_pow_tag_sw_full() instead.
1527  *
1528  * This function does no checks, so the caller must ensure that any
1529  * previous tag switch has completed.
1530  *
1531  * @tag:      new tag value
1532  * @tag_type: new tag type (ordered or atomic)
1533  */
1534 static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
1535                        enum cvmx_pow_tag_type tag_type)
1536 {
1537     cvmx_addr_t ptr;
1538     cvmx_pow_tag_req_t tag_req;
1539 
1540     if (CVMX_ENABLE_POW_CHECKS) {
1541         cvmx_pow_tag_req_t current_tag;
1542         __cvmx_pow_warn_if_pending_switch(__func__);
1543         current_tag = cvmx_pow_get_current_tag();
1544         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1545             pr_warn("%s called with NULL_NULL tag\n", __func__);
1546         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1547             pr_warn("%s called with NULL tag\n", __func__);
1548         if ((current_tag.s.type == tag_type)
1549            && (current_tag.s.tag == tag))
1550             pr_warn("%s called to perform a tag switch to the same tag\n",
1551                 __func__);
1552         if (tag_type == CVMX_POW_TAG_TYPE_NULL)
1553             pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
1554                 __func__);
1555     }
1556 
1557     /*
1558      * Note that WQE in DRAM is not updated here, as the POW does
1559      * not read from DRAM once the WQE is in flight.  See hardware
1560      * manual for complete details.  It is the application's
1561      * responsibility to keep track of the current tag value if
1562      * that is important.
1563      */
1564 
1565     tag_req.u64 = 0;
1566     tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
1567     tag_req.s.tag = tag;
1568     tag_req.s.type = tag_type;
1569 
1570     ptr.u64 = 0;
1571     ptr.sio.mem_region = CVMX_IO_SEG;
1572     ptr.sio.is_io = 1;
1573     ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1574 
1575     /* once this store arrives at POW, it will attempt the switch
1576        software must wait for the switch to complete separately */
1577     cvmx_write_io(ptr.u64, tag_req.u64);
1578 }
1579 
1580 /**
1581  * Starts a tag switch to the provided tag value and tag type.
1582  * Completion for the tag switch must be checked for separately.  This
1583  * function does NOT update the work queue entry in dram to match tag
1584  * value and type, so the application must keep track of these if they
1585  * are important to the application.  This tag switch command must not
1586  * be used for switches to NULL, as the tag switch pending bit will be
1587  * set by the switch request, but never cleared by the hardware.
1588  *
1589  * NOTE: This should not be used when switching from a NULL tag.  Use
1590  * cvmx_pow_tag_sw_full() instead.
1591  *
1592  * This function waits for any previous tag switch to complete, and also
1593  * displays an error on tag switches to NULL.
1594  *
1595  * @tag:      new tag value
1596  * @tag_type: new tag type (ordered or atomic)
1597  */
1598 static inline void cvmx_pow_tag_sw(uint32_t tag,
1599                    enum cvmx_pow_tag_type tag_type)
1600 {
1601     if (CVMX_ENABLE_POW_CHECKS)
1602         __cvmx_pow_warn_if_pending_switch(__func__);
1603 
1604     /*
1605      * Note that WQE in DRAM is not updated here, as the POW does
1606      * not read from DRAM once the WQE is in flight.  See hardware
1607      * manual for complete details.  It is the application's
1608      * responsibility to keep track of the current tag value if
1609      * that is important.
1610      */
1611 
1612     /*
1613      * Ensure that there is not a pending tag switch, as a tag
1614      * switch cannot be started if a previous switch is still
1615      * pending.
1616      */
1617     cvmx_pow_tag_sw_wait();
1618     cvmx_pow_tag_sw_nocheck(tag, tag_type);
1619 }
1620 
1621 /**
1622  * Starts a tag switch to the provided tag value and tag type.
1623  * Completion for the tag switch must be checked for separately.  This
1624  * function does NOT update the work queue entry in dram to match tag
1625  * value and type, so the application must keep track of these if they
1626  * are important to the application.  This tag switch command must not
1627  * be used for switches to NULL, as the tag switch pending bit will be
1628  * set by the switch request, but never cleared by the hardware.
1629  *
1630  * This function must be used for tag switches from NULL.
1631  *
1632  * This function does no checks, so the caller must ensure that any
1633  * previous tag switch has completed.
1634  *
1635  * @wqp:      pointer to work queue entry to submit.  This entry is
1636  *        updated to match the other parameters
1637  * @tag:      tag value to be assigned to work queue entry
1638  * @tag_type: type of tag
1639  * @group:    group value for the work queue entry.
1640  */
1641 static inline void cvmx_pow_tag_sw_full_nocheck(struct cvmx_wqe *wqp, uint32_t tag,
1642                         enum cvmx_pow_tag_type tag_type,
1643                         uint64_t group)
1644 {
1645     cvmx_addr_t ptr;
1646     cvmx_pow_tag_req_t tag_req;
1647 
1648     if (CVMX_ENABLE_POW_CHECKS) {
1649         cvmx_pow_tag_req_t current_tag;
1650         __cvmx_pow_warn_if_pending_switch(__func__);
1651         current_tag = cvmx_pow_get_current_tag();
1652         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1653             pr_warn("%s called with NULL_NULL tag\n", __func__);
1654         if ((current_tag.s.type == tag_type)
1655            && (current_tag.s.tag == tag))
1656             pr_warn("%s called to perform a tag switch to the same tag\n",
1657                 __func__);
1658         if (tag_type == CVMX_POW_TAG_TYPE_NULL)
1659             pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
1660                 __func__);
1661         if (wqp != cvmx_phys_to_ptr(0x80))
1662             if (wqp != cvmx_pow_get_current_wqp())
1663                 pr_warn("%s passed WQE(%p) doesn't match the address in the POW(%p)\n",
1664                     __func__, wqp,
1665                     cvmx_pow_get_current_wqp());
1666     }
1667 
1668     /*
1669      * Note that WQE in DRAM is not updated here, as the POW does
1670      * not read from DRAM once the WQE is in flight.  See hardware
1671      * manual for complete details.  It is the application's
1672      * responsibility to keep track of the current tag value if
1673      * that is important.
1674      */
1675 
1676     tag_req.u64 = 0;
1677     tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1678     tag_req.s.tag = tag;
1679     tag_req.s.type = tag_type;
1680     tag_req.s.grp = group;
1681 
1682     ptr.u64 = 0;
1683     ptr.sio.mem_region = CVMX_IO_SEG;
1684     ptr.sio.is_io = 1;
1685     ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1686     ptr.sio.offset = CAST64(wqp);
1687 
1688     /*
1689      * once this store arrives at POW, it will attempt the switch
1690      * software must wait for the switch to complete separately.
1691      */
1692     cvmx_write_io(ptr.u64, tag_req.u64);
1693 }
1694 
1695 /**
1696  * Starts a tag switch to the provided tag value and tag type.
1697  * Completion for the tag switch must be checked for separately.  This
1698  * function does NOT update the work queue entry in dram to match tag
1699  * value and type, so the application must keep track of these if they
1700  * are important to the application.  This tag switch command must not
1701  * be used for switches to NULL, as the tag switch pending bit will be
1702  * set by the switch request, but never cleared by the hardware.
1703  *
1704  * This function must be used for tag switches from NULL.
1705  *
1706  * This function waits for any pending tag switches to complete
1707  * before requesting the tag switch.
1708  *
1709  * @wqp:      pointer to work queue entry to submit.  This entry is updated
1710  *        to match the other parameters
1711  * @tag:      tag value to be assigned to work queue entry
1712  * @tag_type: type of tag
1713  * @group:  group value for the work queue entry.
1714  */
1715 static inline void cvmx_pow_tag_sw_full(struct cvmx_wqe *wqp, uint32_t tag,
1716                     enum cvmx_pow_tag_type tag_type,
1717                     uint64_t group)
1718 {
1719     if (CVMX_ENABLE_POW_CHECKS)
1720         __cvmx_pow_warn_if_pending_switch(__func__);
1721 
1722     /*
1723      * Ensure that there is not a pending tag switch, as a tag
1724      * switch cannot be started if a previous switch is still
1725      * pending.
1726      */
1727     cvmx_pow_tag_sw_wait();
1728     cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
1729 }
1730 
1731 /**
1732  * Switch to a NULL tag, which ends any ordering or
1733  * synchronization provided by the POW for the current
1734  * work queue entry.  This operation completes immediately,
1735  * so completion should not be waited for.
1736  * This function does NOT wait for previous tag switches to complete,
1737  * so the caller must ensure that any previous tag switches have completed.
1738  */
1739 static inline void cvmx_pow_tag_sw_null_nocheck(void)
1740 {
1741     cvmx_addr_t ptr;
1742     cvmx_pow_tag_req_t tag_req;
1743 
1744     if (CVMX_ENABLE_POW_CHECKS) {
1745         cvmx_pow_tag_req_t current_tag;
1746         __cvmx_pow_warn_if_pending_switch(__func__);
1747         current_tag = cvmx_pow_get_current_tag();
1748         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1749             pr_warn("%s called with NULL_NULL tag\n", __func__);
1750         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1751             pr_warn("%s called when we already have a NULL tag\n",
1752                 __func__);
1753     }
1754 
1755     tag_req.u64 = 0;
1756     tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
1757     tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
1758 
1759     ptr.u64 = 0;
1760     ptr.sio.mem_region = CVMX_IO_SEG;
1761     ptr.sio.is_io = 1;
1762     ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1763 
1764     cvmx_write_io(ptr.u64, tag_req.u64);
1765 
1766     /* switch to NULL completes immediately */
1767 }
1768 
1769 /**
1770  * Switch to a NULL tag, which ends any ordering or
1771  * synchronization provided by the POW for the current
1772  * work queue entry.  This operation completes immediately,
1773  * so completion should not be waited for.
1774  * This function waits for any pending tag switches to complete
1775  * before requesting the switch to NULL.
1776  */
1777 static inline void cvmx_pow_tag_sw_null(void)
1778 {
1779     if (CVMX_ENABLE_POW_CHECKS)
1780         __cvmx_pow_warn_if_pending_switch(__func__);
1781 
1782     /*
1783      * Ensure that there is not a pending tag switch, as a tag
1784      * switch cannot be started if a previous switch is still
1785      * pending.
1786      */
1787     cvmx_pow_tag_sw_wait();
1788     cvmx_pow_tag_sw_null_nocheck();
1789 
1790     /* switch to NULL completes immediately */
1791 }
1792 
1793 /**
1794  * Submits work to an input queue.  This function updates the work
1795  * queue entry in DRAM to match the arguments given.  Note that the
1796  * tag provided is for the work queue entry submitted, and is
1797  * unrelated to the tag that the core currently holds.
1798  *
1799  * @wqp:      pointer to work queue entry to submit.  This entry is
1800  *        updated to match the other parameters
1801  * @tag:      tag value to be assigned to work queue entry
1802  * @tag_type: type of tag
1803  * @qos:      Input queue to add to.
1804  * @grp:      group value for the work queue entry.
1805  */
1806 static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
1807                     enum cvmx_pow_tag_type tag_type,
1808                     uint64_t qos, uint64_t grp)
1809 {
1810     cvmx_addr_t ptr;
1811     cvmx_pow_tag_req_t tag_req;
1812 
1813     wqp->word1.tag = tag;
1814     wqp->word1.tag_type = tag_type;
1815 
1816     cvmx_wqe_set_qos(wqp, qos);
1817     cvmx_wqe_set_grp(wqp, grp);
1818 
1819     tag_req.u64 = 0;
1820     tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
1821     tag_req.s.type = tag_type;
1822     tag_req.s.tag = tag;
1823     tag_req.s.qos = qos;
1824     tag_req.s.grp = grp;
1825 
1826     ptr.u64 = 0;
1827     ptr.sio.mem_region = CVMX_IO_SEG;
1828     ptr.sio.is_io = 1;
1829     ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1830     ptr.sio.offset = cvmx_ptr_to_phys(wqp);
1831 
1832     /*
1833      * SYNC write to memory before the work submit.  This is
1834      * necessary as POW may read values from DRAM at this time.
1835      */
1836     CVMX_SYNCWS;
1837     cvmx_write_io(ptr.u64, tag_req.u64);
1838 }
1839 
1840 /**
1841  * This function sets the group mask for a core.  The group mask
1842  * indicates which groups each core will accept work from. There are
1843  * 16 groups.
1844  *
1845  * @core_num:   core to apply mask to
1846  * @mask:   Group mask. There are 16 groups, so only bits 0-15 are valid,
1847  *       representing groups 0-15.
1848  *       Each 1 bit in the mask enables the core to accept work from
1849  *       the corresponding group.
1850  */
1851 static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
1852 {
1853     union cvmx_pow_pp_grp_mskx grp_msk;
1854 
1855     grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1856     grp_msk.s.grp_msk = mask;
1857     cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1858 }
1859 
1860 /**
1861  * This function sets POW static priorities for a core. Each input queue has
1862  * an associated priority value.
1863  *
1864  * @core_num:   core to apply priorities to
1865  * @priority:   Vector of 8 priorities, one per POW Input Queue (0-7).
1866  *           Highest priority is 0 and lowest is 7. A priority value
1867  *           of 0xF instructs POW to skip the Input Queue when
1868  *           scheduling to this specific core.
1869  *           NOTE: priorities should not have gaps in values, meaning
1870  *             {0,1,1,1,1,1,1,1} is a valid configuration while
1871  *             {0,2,2,2,2,2,2,2} is not.
1872  */
1873 static inline void cvmx_pow_set_priority(uint64_t core_num,
1874                      const uint8_t priority[])
1875 {
1876     /* POW priorities are supported on CN5xxx and later */
1877     if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
1878         union cvmx_pow_pp_grp_mskx grp_msk;
1879 
1880         grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1881         grp_msk.s.qos0_pri = priority[0];
1882         grp_msk.s.qos1_pri = priority[1];
1883         grp_msk.s.qos2_pri = priority[2];
1884         grp_msk.s.qos3_pri = priority[3];
1885         grp_msk.s.qos4_pri = priority[4];
1886         grp_msk.s.qos5_pri = priority[5];
1887         grp_msk.s.qos6_pri = priority[6];
1888         grp_msk.s.qos7_pri = priority[7];
1889 
1890         /* Detect gaps between priorities and flag error */
1891         {
1892             int i;
1893             uint32_t prio_mask = 0;
1894 
1895             for (i = 0; i < 8; i++)
1896                 if (priority[i] != 0xF)
1897                     prio_mask |= 1 << priority[i];
1898 
1899             if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
1900                 pr_err("POW static priorities should be "
1901                        "contiguous (0x%llx)\n",
1902                      (unsigned long long)prio_mask);
1903                 return;
1904             }
1905         }
1906 
1907         cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1908     }
1909 }
1910 
1911 /**
1912  * Performs a tag switch and then an immediate deschedule. This completes
1913  * immediately, so completion must not be waited for.  This function does NOT
1914  * update the wqe in DRAM to match arguments.
1915  *
1916  * This function does NOT wait for any prior tag switches to complete, so the
1917  * calling code must do this.
1918  *
1919  * Note the following CAVEAT of the Octeon HW behavior when
1920  * re-scheduling DE-SCHEDULEd items whose (next) state is
1921  * ORDERED:
1922  *   - If there are no switches pending at the time that the
1923  *     HW executes the de-schedule, the HW will only re-schedule
1924  *     the head of the FIFO associated with the given tag. This
1925  *     means that in many respects, the HW treats this ORDERED
1926  *     tag as an ATOMIC tag. Note that in the SWTAG_DESCH
1927  *     case (to an ORDERED tag), the HW will do the switch
1928  *     before the deschedule whenever it is possible to do
1929  *     the switch immediately, so it may often look like
1930  *     this case.
1931  *   - If there is a pending switch to ORDERED at the time
1932  *     the HW executes the de-schedule, the HW will perform
1933  *     the switch at the time it re-schedules, and will be
1934  *     able to reschedule any/all of the entries with the
1935  *     same tag.
1936  * Due to this behavior, the RECOMMENDATION to software is
1937  * that they have a (next) state of ATOMIC when they
1938  * DE-SCHEDULE. If an ORDERED tag is what was really desired,
1939  * SW can choose to immediately switch to an ORDERED tag
1940  * after the work (that has an ATOMIC tag) is re-scheduled.
1941  * Note that since there are never any tag switches pending
1942  * when the HW re-schedules, this switch can be IMMEDIATE upon
1943  * the reception of the pointer during the re-schedule.
1944  *
1945  * @tag:      New tag value
1946  * @tag_type: New tag type
1947  * @group:    New group value
1948  * @no_sched: Control whether this work queue entry will be rescheduled.
1949  *         - 1 : don't schedule this work
1950  *         - 0 : allow this work to be scheduled.
1951  */
1952 static inline void cvmx_pow_tag_sw_desched_nocheck(
1953     uint32_t tag,
1954     enum cvmx_pow_tag_type tag_type,
1955     uint64_t group,
1956     uint64_t no_sched)
1957 {
1958     cvmx_addr_t ptr;
1959     cvmx_pow_tag_req_t tag_req;
1960 
1961     if (CVMX_ENABLE_POW_CHECKS) {
1962         cvmx_pow_tag_req_t current_tag;
1963         __cvmx_pow_warn_if_pending_switch(__func__);
1964         current_tag = cvmx_pow_get_current_tag();
1965         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1966             pr_warn("%s called with NULL_NULL tag\n", __func__);
1967         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1968             pr_warn("%s called with NULL tag. Deschedule not allowed from NULL state\n",
1969                 __func__);
1970         if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
1971             && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
1972             pr_warn("%s called where neither the before or after tag is ATOMIC\n",
1973                 __func__);
1974     }
1975 
1976     tag_req.u64 = 0;
1977     tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
1978     tag_req.s.tag = tag;
1979     tag_req.s.type = tag_type;
1980     tag_req.s.grp = group;
1981     tag_req.s.no_sched = no_sched;
1982 
1983     ptr.u64 = 0;
1984     ptr.sio.mem_region = CVMX_IO_SEG;
1985     ptr.sio.is_io = 1;
1986     ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
1987     /*
1988      * since TAG3 is used, this store will clear the local pending
1989      * switch bit.
1990      */
1991     cvmx_write_io(ptr.u64, tag_req.u64);
1992 }
1993 
1994 /**
1995  * Performs a tag switch and then an immediate deschedule. This completes
1996  * immediately, so completion must not be waited for.  This function does NOT
1997  * update the wqe in DRAM to match arguments.
1998  *
1999  * This function waits for any prior tag switches to complete, so the
2000  * calling code may call this function with a pending tag switch.
2001  *
2002  * Note the following CAVEAT of the Octeon HW behavior when
2003  * re-scheduling DE-SCHEDULEd items whose (next) state is
2004  * ORDERED:
2005  *   - If there are no switches pending at the time that the
2006  *     HW executes the de-schedule, the HW will only re-schedule
2007  *     the head of the FIFO associated with the given tag. This
2008  *     means that in many respects, the HW treats this ORDERED
2009  *     tag as an ATOMIC tag. Note that in the SWTAG_DESCH
2010  *     case (to an ORDERED tag), the HW will do the switch
2011  *     before the deschedule whenever it is possible to do
2012  *     the switch immediately, so it may often look like
2013  *     this case.
2014  *   - If there is a pending switch to ORDERED at the time
2015  *     the HW executes the de-schedule, the HW will perform
2016  *     the switch at the time it re-schedules, and will be
2017  *     able to reschedule any/all of the entries with the
2018  *     same tag.
2019  * Due to this behavior, the RECOMMENDATION to software is
2020  * that they have a (next) state of ATOMIC when they
2021  * DE-SCHEDULE. If an ORDERED tag is what was really desired,
2022  * SW can choose to immediately switch to an ORDERED tag
2023  * after the work (that has an ATOMIC tag) is re-scheduled.
2024  * Note that since there are never any tag switches pending
2025  * when the HW re-schedules, this switch can be IMMEDIATE upon
2026  * the reception of the pointer during the re-schedule.
2027  *
2028  * @tag:      New tag value
2029  * @tag_type: New tag type
2030  * @group:    New group value
2031  * @no_sched: Control whether this work queue entry will be rescheduled.
2032  *         - 1 : don't schedule this work
2033  *         - 0 : allow this work to be scheduled.
2034  */
2035 static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
2036                        enum cvmx_pow_tag_type tag_type,
2037                        uint64_t group, uint64_t no_sched)
2038 {
2039     if (CVMX_ENABLE_POW_CHECKS)
2040         __cvmx_pow_warn_if_pending_switch(__func__);
2041 
2042     /* Need to make sure any writes to the work queue entry are complete */
2043     CVMX_SYNCWS;
2044     /*
2045      * Ensure that there is not a pending tag switch, as a tag
2046      * switch cannot be started if a previous switch is still
2047      * pending.
2048      */
2049     cvmx_pow_tag_sw_wait();
2050     cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
2051 }
2052 
2053 /**
2054  * Deschedules the current work queue entry.
2055  *
2056  * @no_sched: no schedule flag value to be set on the work queue
2057  *        entry.  If this is set the entry will not be
2058  *        rescheduled.
2059  */
2060 static inline void cvmx_pow_desched(uint64_t no_sched)
2061 {
2062     cvmx_addr_t ptr;
2063     cvmx_pow_tag_req_t tag_req;
2064 
2065     if (CVMX_ENABLE_POW_CHECKS) {
2066         cvmx_pow_tag_req_t current_tag;
2067         __cvmx_pow_warn_if_pending_switch(__func__);
2068         current_tag = cvmx_pow_get_current_tag();
2069         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
2070             pr_warn("%s called with NULL_NULL tag\n", __func__);
2071         if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
2072             pr_warn("%s called with NULL tag. Deschedule not expected from NULL state\n",
2073                 __func__);
2074     }
2075 
2076     /* Need to make sure any writes to the work queue entry are complete */
2077     CVMX_SYNCWS;
2078 
2079     tag_req.u64 = 0;
2080     tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
2081     tag_req.s.no_sched = no_sched;
2082 
2083     ptr.u64 = 0;
2084     ptr.sio.mem_region = CVMX_IO_SEG;
2085     ptr.sio.is_io = 1;
2086     ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
2087     /*
2088      * since TAG3 is used, this store will clear the local pending
2089      * switch bit.
2090      */
2091     cvmx_write_io(ptr.u64, tag_req.u64);
2092 }
2093 
2094 /****************************************************
2095 * Define usage of bits within the 32 bit tag values.
2096 *****************************************************/
2097 
2098 /*
2099  * Number of bits of the tag used by software.  The SW bits are always
2100  * a contiguous block of the high starting at bit 31.  The hardware
2101  * bits are always the low bits.  By default, the top 8 bits of the
2102  * tag are reserved for software, and the low 24 are set by the IPD
2103  * unit.
2104  */
2105 #define CVMX_TAG_SW_BITS    (8)
2106 #define CVMX_TAG_SW_SHIFT   (32 - CVMX_TAG_SW_BITS)
2107 
2108 /* Below is the list of values for the top 8 bits of the tag. */
2109 /*
2110  * Tag values with top byte of this value are reserved for internal
2111  * executive uses.
2112  */
2113 #define CVMX_TAG_SW_BITS_INTERNAL  0x1
2114 /* The executive divides the remaining 24 bits as follows:
2115  *  - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
2116  *
2117  *  - the lower 16 bits (bits 15 - 0 of the tag) define are the value
2118  *    with the subgroup
2119  *
2120  * Note that this section describes the format of tags generated by
2121  * software - refer to the hardware documentation for a description of
2122  * the tags values generated by the packet input hardware.  Subgroups
2123  * are defined here.
2124  */
2125 /* Mask for the value portion of the tag */
2126 #define CVMX_TAG_SUBGROUP_MASK  0xFFFF
2127 #define CVMX_TAG_SUBGROUP_SHIFT 16
2128 #define CVMX_TAG_SUBGROUP_PKO  0x1
2129 
2130 /* End of executive tag subgroup definitions */
2131 
2132 /*
2133  * The remaining values software bit values 0x2 - 0xff are available
2134  * for application use.
2135  */
2136 
2137 /**
2138  * This function creates a 32 bit tag value from the two values provided.
2139  *
2140  * @sw_bits: The upper bits (number depends on configuration) are set
2141  *       to this value.  The remainder of bits are set by the
2142  *       hw_bits parameter.
2143  *
2144  * @hw_bits: The lower bits (number depends on configuration) are set
2145  *       to this value.  The remainder of bits are set by the
2146  *       sw_bits parameter.
2147  *
2148  * Returns 32 bit value of the combined hw and sw bits.
2149  */
2150 static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
2151 {
2152     return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) <<
2153             CVMX_TAG_SW_SHIFT) |
2154         (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
2155 }
2156 
2157 /**
2158  * Extracts the bits allocated for software use from the tag
2159  *
2160  * @tag:    32 bit tag value
2161  *
2162  * Returns N bit software tag value, where N is configurable with the
2163  * CVMX_TAG_SW_BITS define
2164  */
2165 static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
2166 {
2167     return (tag >> (32 - CVMX_TAG_SW_BITS)) &
2168         cvmx_build_mask(CVMX_TAG_SW_BITS);
2169 }
2170 
2171 /**
2172  *
2173  * Extracts the bits allocated for hardware use from the tag
2174  *
2175  * @tag:    32 bit tag value
2176  *
2177  * Returns (32 - N) bit software tag value, where N is configurable
2178  * with the CVMX_TAG_SW_BITS define
2179  */
2180 static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
2181 {
2182     return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS);
2183 }
2184 
2185 /**
2186  * Store the current POW internal state into the supplied
2187  * buffer. It is recommended that you pass a buffer of at least
2188  * 128KB. The format of the capture may change based on SDK
2189  * version and Octeon chip.
2190  *
2191  * @buffer: Buffer to store capture into
2192  * @buffer_size:
2193  *       The size of the supplied buffer
2194  *
2195  * Returns Zero on success, negative on failure
2196  */
2197 extern int cvmx_pow_capture(void *buffer, int buffer_size);
2198 
2199 /**
2200  * Dump a POW capture to the console in a human readable format.
2201  *
2202  * @buffer: POW capture from cvmx_pow_capture()
2203  * @buffer_size:
2204  *       Size of the buffer
2205  */
2206 extern void cvmx_pow_display(void *buffer, int buffer_size);
2207 
2208 /**
2209  * Return the number of POW entries supported by this chip
2210  *
2211  * Returns Number of POW entries
2212  */
2213 extern int cvmx_pow_get_num_entries(void);
2214 
2215 #endif /* __CVMX_POW_H__ */