0001
0002
0003
0004
0005
0006
0007 #define pr_fmt(fmt) "X25: " fmt
0008
0009 #include <linux/if_arp.h>
0010 #include <linux/init.h>
0011 #include <linux/slab.h>
0012 #include <net/x25.h>
0013
0014 LIST_HEAD(x25_forward_list);
0015 DEFINE_RWLOCK(x25_forward_list_lock);
0016
0017 int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
0018 struct sk_buff *skb, int lci)
0019 {
0020 struct x25_route *rt;
0021 struct x25_neigh *neigh_new = NULL;
0022 struct x25_forward *x25_frwd, *new_frwd;
0023 struct sk_buff *skbn;
0024 short same_lci = 0;
0025 int rc = 0;
0026
0027 if ((rt = x25_get_route(dest_addr)) == NULL)
0028 goto out_no_route;
0029
0030 if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
0031
0032
0033
0034 goto out_put_route;
0035 }
0036
0037
0038
0039
0040 if (rt->dev == from->dev) {
0041 goto out_put_nb;
0042 }
0043
0044
0045
0046
0047 read_lock_bh(&x25_forward_list_lock);
0048 list_for_each_entry(x25_frwd, &x25_forward_list, node) {
0049 if (x25_frwd->lci == lci) {
0050 pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
0051 same_lci = 1;
0052 }
0053 }
0054 read_unlock_bh(&x25_forward_list_lock);
0055
0056
0057 if (!same_lci){
0058 if ((new_frwd = kmalloc(sizeof(struct x25_forward),
0059 GFP_ATOMIC)) == NULL){
0060 rc = -ENOMEM;
0061 goto out_put_nb;
0062 }
0063 new_frwd->lci = lci;
0064 new_frwd->dev1 = rt->dev;
0065 new_frwd->dev2 = from->dev;
0066 write_lock_bh(&x25_forward_list_lock);
0067 list_add(&new_frwd->node, &x25_forward_list);
0068 write_unlock_bh(&x25_forward_list_lock);
0069 }
0070
0071
0072 if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
0073 goto out_put_nb;
0074 }
0075 x25_transmit_link(skbn, neigh_new);
0076 rc = 1;
0077
0078
0079 out_put_nb:
0080 x25_neigh_put(neigh_new);
0081
0082 out_put_route:
0083 x25_route_put(rt);
0084
0085 out_no_route:
0086 return rc;
0087 }
0088
0089
0090 int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
0091
0092 struct x25_forward *frwd;
0093 struct net_device *peer = NULL;
0094 struct x25_neigh *nb;
0095 struct sk_buff *skbn;
0096 int rc = 0;
0097
0098 read_lock_bh(&x25_forward_list_lock);
0099 list_for_each_entry(frwd, &x25_forward_list, node) {
0100 if (frwd->lci == lci) {
0101
0102 if (from->dev == frwd->dev1) {
0103 peer = frwd->dev2;
0104 } else {
0105 peer = frwd->dev1;
0106 }
0107 break;
0108 }
0109 }
0110 read_unlock_bh(&x25_forward_list_lock);
0111
0112 if ( (nb = x25_get_neigh(peer)) == NULL)
0113 goto out;
0114
0115 if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
0116 goto output;
0117
0118 }
0119 x25_transmit_link(skbn, nb);
0120
0121 rc = 1;
0122 output:
0123 x25_neigh_put(nb);
0124 out:
0125 return rc;
0126 }
0127
0128 void x25_clear_forward_by_lci(unsigned int lci)
0129 {
0130 struct x25_forward *fwd, *tmp;
0131
0132 write_lock_bh(&x25_forward_list_lock);
0133
0134 list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
0135 if (fwd->lci == lci) {
0136 list_del(&fwd->node);
0137 kfree(fwd);
0138 }
0139 }
0140 write_unlock_bh(&x25_forward_list_lock);
0141 }
0142
0143
0144 void x25_clear_forward_by_dev(struct net_device *dev)
0145 {
0146 struct x25_forward *fwd, *tmp;
0147
0148 write_lock_bh(&x25_forward_list_lock);
0149
0150 list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
0151 if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
0152 list_del(&fwd->node);
0153 kfree(fwd);
0154 }
0155 }
0156 write_unlock_bh(&x25_forward_list_lock);
0157 }