0001
0002
0003
0004 #include "cxgb4.h"
0005
0006 static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap,
0007 const u8 *addr, const u8 *mask)
0008 {
0009 u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
0010 struct mps_entries_ref *mps_entry, *tmp;
0011 int ret = -EINVAL;
0012
0013 spin_lock_bh(&adap->mps_ref_lock);
0014 list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
0015 if (ether_addr_equal(mps_entry->addr, addr) &&
0016 ether_addr_equal(mps_entry->mask, mask ? mask : bitmask)) {
0017 if (!refcount_dec_and_test(&mps_entry->refcnt)) {
0018 spin_unlock_bh(&adap->mps_ref_lock);
0019 return -EBUSY;
0020 }
0021 list_del(&mps_entry->list);
0022 kfree(mps_entry);
0023 ret = 0;
0024 break;
0025 }
0026 }
0027 spin_unlock_bh(&adap->mps_ref_lock);
0028 return ret;
0029 }
0030
0031 static int cxgb4_mps_ref_dec(struct adapter *adap, u16 idx)
0032 {
0033 struct mps_entries_ref *mps_entry, *tmp;
0034 int ret = -EINVAL;
0035
0036 spin_lock(&adap->mps_ref_lock);
0037 list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
0038 if (mps_entry->idx == idx) {
0039 if (!refcount_dec_and_test(&mps_entry->refcnt)) {
0040 spin_unlock(&adap->mps_ref_lock);
0041 return -EBUSY;
0042 }
0043 list_del(&mps_entry->list);
0044 kfree(mps_entry);
0045 ret = 0;
0046 break;
0047 }
0048 }
0049 spin_unlock(&adap->mps_ref_lock);
0050 return ret;
0051 }
0052
0053 static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr,
0054 u16 idx, const u8 *mask)
0055 {
0056 u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
0057 struct mps_entries_ref *mps_entry;
0058 int ret = 0;
0059
0060 spin_lock_bh(&adap->mps_ref_lock);
0061 list_for_each_entry(mps_entry, &adap->mps_ref, list) {
0062 if (mps_entry->idx == idx) {
0063 refcount_inc(&mps_entry->refcnt);
0064 goto unlock;
0065 }
0066 }
0067 mps_entry = kzalloc(sizeof(*mps_entry), GFP_ATOMIC);
0068 if (!mps_entry) {
0069 ret = -ENOMEM;
0070 goto unlock;
0071 }
0072 ether_addr_copy(mps_entry->mask, mask ? mask : bitmask);
0073 ether_addr_copy(mps_entry->addr, mac_addr);
0074 mps_entry->idx = idx;
0075 refcount_set(&mps_entry->refcnt, 1);
0076 list_add_tail(&mps_entry->list, &adap->mps_ref);
0077 unlock:
0078 spin_unlock_bh(&adap->mps_ref_lock);
0079 return ret;
0080 }
0081
0082 int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
0083 unsigned int naddr, const u8 **addr, bool sleep_ok)
0084 {
0085 int ret, i;
0086
0087 for (i = 0; i < naddr; i++) {
0088 if (!cxgb4_mps_ref_dec_by_mac(adap, addr[i], NULL)) {
0089 ret = t4_free_mac_filt(adap, adap->mbox, viid,
0090 1, &addr[i], sleep_ok);
0091 if (ret < 0)
0092 return ret;
0093 }
0094 }
0095
0096
0097 return naddr;
0098 }
0099
0100 int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
0101 bool free, unsigned int naddr, const u8 **addr,
0102 u16 *idx, u64 *hash, bool sleep_ok)
0103 {
0104 int ret, i;
0105
0106 ret = t4_alloc_mac_filt(adap, adap->mbox, viid, free,
0107 naddr, addr, idx, hash, sleep_ok);
0108 if (ret < 0)
0109 return ret;
0110
0111 for (i = 0; i < naddr; i++) {
0112 if (idx[i] != 0xffff) {
0113 if (cxgb4_mps_ref_inc(adap, addr[i], idx[i], NULL)) {
0114 ret = -ENOMEM;
0115 goto error;
0116 }
0117 }
0118 }
0119
0120 goto out;
0121 error:
0122 cxgb4_free_mac_filt(adap, viid, naddr, addr, sleep_ok);
0123
0124 out:
0125
0126 return ret;
0127 }
0128
0129 int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
0130 int *tcam_idx, const u8 *addr,
0131 bool persistent, u8 *smt_idx)
0132 {
0133 int ret;
0134
0135 ret = cxgb4_change_mac(pi, viid, tcam_idx,
0136 addr, persistent, smt_idx);
0137 if (ret < 0)
0138 return ret;
0139
0140 cxgb4_mps_ref_inc(pi->adapter, addr, *tcam_idx, NULL);
0141 return ret;
0142 }
0143
0144 int cxgb4_free_raw_mac_filt(struct adapter *adap,
0145 unsigned int viid,
0146 const u8 *addr,
0147 const u8 *mask,
0148 unsigned int idx,
0149 u8 lookup_type,
0150 u8 port_id,
0151 bool sleep_ok)
0152 {
0153 int ret = 0;
0154
0155 if (!cxgb4_mps_ref_dec(adap, idx))
0156 ret = t4_free_raw_mac_filt(adap, viid, addr,
0157 mask, idx, lookup_type,
0158 port_id, sleep_ok);
0159
0160 return ret;
0161 }
0162
0163 int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
0164 unsigned int viid,
0165 const u8 *addr,
0166 const u8 *mask,
0167 unsigned int idx,
0168 u8 lookup_type,
0169 u8 port_id,
0170 bool sleep_ok)
0171 {
0172 int ret;
0173
0174 ret = t4_alloc_raw_mac_filt(adap, viid, addr,
0175 mask, idx, lookup_type,
0176 port_id, sleep_ok);
0177 if (ret < 0)
0178 return ret;
0179
0180 if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
0181 ret = -ENOMEM;
0182 t4_free_raw_mac_filt(adap, viid, addr,
0183 mask, idx, lookup_type,
0184 port_id, sleep_ok);
0185 }
0186
0187 return ret;
0188 }
0189
0190 int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
0191 int idx, bool sleep_ok)
0192 {
0193 int ret = 0;
0194
0195 if (!cxgb4_mps_ref_dec(adap, idx))
0196 ret = t4_free_encap_mac_filt(adap, viid, idx, sleep_ok);
0197
0198 return ret;
0199 }
0200
0201 int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
0202 const u8 *addr, const u8 *mask,
0203 unsigned int vni, unsigned int vni_mask,
0204 u8 dip_hit, u8 lookup_type, bool sleep_ok)
0205 {
0206 int ret;
0207
0208 ret = t4_alloc_encap_mac_filt(adap, viid, addr, mask, vni, vni_mask,
0209 dip_hit, lookup_type, sleep_ok);
0210 if (ret < 0)
0211 return ret;
0212
0213 if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
0214 ret = -ENOMEM;
0215 t4_free_encap_mac_filt(adap, viid, ret, sleep_ok);
0216 }
0217 return ret;
0218 }
0219
0220 int cxgb4_init_mps_ref_entries(struct adapter *adap)
0221 {
0222 spin_lock_init(&adap->mps_ref_lock);
0223 INIT_LIST_HEAD(&adap->mps_ref);
0224
0225 return 0;
0226 }
0227
0228 void cxgb4_free_mps_ref_entries(struct adapter *adap)
0229 {
0230 struct mps_entries_ref *mps_entry, *tmp;
0231
0232 if (list_empty(&adap->mps_ref))
0233 return;
0234
0235 spin_lock(&adap->mps_ref_lock);
0236 list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
0237 list_del(&mps_entry->list);
0238 kfree(mps_entry);
0239 }
0240 spin_unlock(&adap->mps_ref_lock);
0241 }