/* VLAN EVENTS */
VLAN_HEADER_TOO_SMALL, /**< vlan header smaller than minimum size */
VLAN_UNKNOWN_TYPE, /**< vlan unknown type */
+ VLAN_HEADER_TOO_MANY_LAYERS,
/* RAW EVENTS */
IPRAW_INVALID_IPV, /**< invalid ip version in ip raw */
*/
void DecodeVLAN(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, uint16_t len, PacketQueue *pq)
{
+ uint32_t proto;
+
SCPerfCounterIncr(dtv->counter_vlan, tv->sc_perf_pca);
if(len < VLAN_HEADER_LEN) {
ENGINE_SET_EVENT(p,VLAN_HEADER_TOO_SMALL);
return;
}
+ if (p->vlan_idx >= 2) {
+ ENGINE_SET_EVENT(p,VLAN_HEADER_TOO_MANY_LAYERS);
+ return;
+ }
- p->vlanh = (VLANHdr *)pkt;
- if(p->vlanh == NULL)
+ p->vlanh[p->vlan_idx] = (VLANHdr *)pkt;
+ if(p->vlanh[p->vlan_idx] == NULL)
return;
+ proto = GET_VLAN_PROTO(p->vlanh[p->vlan_idx]);
+
SCLogDebug("p %p pkt %p VLAN protocol %04x VLAN PRI %d VLAN CFI %d VLAN ID %d Len: %" PRId32 "",
- p, pkt, GET_VLAN_PROTO(p->vlanh), GET_VLAN_PRIORITY(p->vlanh), GET_VLAN_CFI(p->vlanh), GET_VLAN_ID(p->vlanh), len);
+ p, pkt, proto, GET_VLAN_PRIORITY(p->vlanh[p->vlan_idx]),
+ GET_VLAN_CFI(p->vlanh[p->vlan_idx]), GET_VLAN_ID(p->vlanh[p->vlan_idx]), len);
+
+ p->vlan_id[p->vlan_idx] = (uint16_t)GET_VLAN_ID(p->vlanh[p->vlan_idx]);
+ p->vlan_idx++;
- switch (GET_VLAN_PROTO(p->vlanh)) {
+ switch (proto) {
case ETHERNET_TYPE_IP:
DecodeIPV4(tv, dtv, p, pkt + VLAN_HEADER_LEN,
len - VLAN_HEADER_LEN, pq);
len - VLAN_HEADER_LEN, pq);
break;
case ETHERNET_TYPE_VLAN:
- DecodeVLAN(tv, dtv, p, pkt + VLAN_HEADER_LEN,
- len - VLAN_HEADER_LEN, pq);
+ if (p->vlan_idx >= 2) {
+ ENGINE_SET_EVENT(p,VLAN_HEADER_TOO_MANY_LAYERS);
+ return;
+ } else {
+ DecodeVLAN(tv, dtv, p, pkt + VLAN_HEADER_LEN,
+ len - VLAN_HEADER_LEN, pq);
+ }
break;
default:
- SCLogDebug("unknown VLAN type: %" PRIx32 "",GET_VLAN_PROTO(p->vlanh));
+ SCLogDebug("unknown VLAN type: %" PRIx32 "", proto);
ENGINE_SET_EVENT(p,VLAN_UNKNOWN_TYPE);
return;
}
* has the exact same tuple as the lower levels */
uint8_t recursion_level;
+ uint16_t vlan_id[2];
+ uint8_t vlan_idx;
+
/* Pkt Flags */
uint32_t flags;
GREHdr *greh;
- VLANHdr *vlanh;
+ VLANHdr *vlanh[2];
/* ptr to the payload of the packet
* with it's length. */
(p)->flags = (p)->flags & PKT_ALLOC; \
(p)->flowflags = 0; \
(p)->pkt_src = 0; \
+ (p)->vlan_id[0] = 0; \
+ (p)->vlan_id[1] = 0; \
+ (p)->vlan_idx = 0; \
FlowDeReference(&((p)->flow)); \
(p)->ts.tv_sec = 0; \
(p)->ts.tv_usec = 0; \
(p)->pppoesh = NULL; \
(p)->pppoedh = NULL; \
(p)->greh = NULL; \
- (p)->vlanh = NULL; \
+ (p)->vlanh[0] = NULL; \
+ (p)->vlanh[1] = NULL; \
(p)->payload = NULL; \
(p)->payload_len = 0; \
(p)->pktlen = 0; \
{ "ipraw.invalid_ip_version",IPRAW_INVALID_IPV, },
{ "vlan.header_too_small",VLAN_HEADER_TOO_SMALL, },
{ "vlan.unknown_type",VLAN_UNKNOWN_TYPE, },
+ { "vlan.too_many_layers", VLAN_HEADER_TOO_MANY_LAYERS, },
{ "ipv4.frag_too_large", IPV4_FRAG_PKT_TOO_LARGE, },
{ "ipv4.frag_overlap", IPV4_FRAG_OVERLAP, },
{ "ipv6.frag_too_large", IPV6_FRAG_PKT_TOO_LARGE, },
uint16_t sp, dp;
uint16_t proto; /**< u16 so proto and recur add up to u32 */
uint16_t recur; /**< u16 so proto and recur add up to u32 */
+ uint16_t vlan_id[2];
};
- uint32_t u32[4];
+ const uint32_t u32[5];
};
} FlowHashKey4;
uint16_t sp, dp;
uint16_t proto; /**< u16 so proto and recur add up to u32 */
uint16_t recur; /**< u16 so proto and recur add up to u32 */
+ uint16_t vlan_id[2];
};
- uint32_t u32[10];
+ const uint32_t u32[11];
};
} FlowHashKey6;
}
fhk.proto = (uint16_t)p->proto;
fhk.recur = (uint16_t)p->recursion_level;
+ fhk.vlan_id[0] = p->vlan_id[0];
+ fhk.vlan_id[1] = p->vlan_id[1];
- uint32_t hash = hashword(fhk.u32, 4, flow_config.hash_rand);
+ uint32_t hash = hashword(fhk.u32, 5, flow_config.hash_rand);
key = hash % flow_config.hash_size;
} else if (ICMPV4_DEST_UNREACH_IS_VALID(p)) {
}
fhk.proto = (uint16_t)ICMPV4_GET_EMB_PROTO(p);
fhk.recur = (uint16_t)p->recursion_level;
+ fhk.vlan_id[0] = p->vlan_id[0];
+ fhk.vlan_id[1] = p->vlan_id[1];
- uint32_t hash = hashword(fhk.u32, 4, flow_config.hash_rand);
+ uint32_t hash = hashword(fhk.u32, 5, flow_config.hash_rand);
key = hash % flow_config.hash_size;
} else {
fhk.dp = 0xbeef;
fhk.proto = (uint16_t)p->proto;
fhk.recur = (uint16_t)p->recursion_level;
+ fhk.vlan_id[0] = p->vlan_id[0];
+ fhk.vlan_id[1] = p->vlan_id[1];
- uint32_t hash = hashword(fhk.u32, 4, flow_config.hash_rand);
+ uint32_t hash = hashword(fhk.u32, 5, flow_config.hash_rand);
key = hash % flow_config.hash_size;
}
} else if (p->ip6h != NULL) {
}
fhk.proto = (uint16_t)p->proto;
fhk.recur = (uint16_t)p->recursion_level;
+ fhk.vlan_id[0] = p->vlan_id[0];
+ fhk.vlan_id[1] = p->vlan_id[1];
- uint32_t hash = hashword(fhk.u32, 10, flow_config.hash_rand);
+ uint32_t hash = hashword(fhk.u32, 11, flow_config.hash_rand);
key = hash % flow_config.hash_size;
} else
key = 0;
CMP_ADDR(&(f1)->dst, &(f2)->src) && \
CMP_PORT((f1)->sp, (f2)->dp) && CMP_PORT((f1)->dp, (f2)->sp))) && \
(f1)->proto == (f2)->proto && \
- (f1)->recursion_level == (f2)->recursion_level)
+ (f1)->recursion_level == (f2)->recursion_level && \
+ (f1)->vlan_id[0] == (f2)->vlan_id[0] && \
+ (f1)->vlan_id[1] == (f2)->vlan_id[1])
/**
* \brief See if a ICMP packet belongs to a flow by comparing the embedded
f->sp == p->icmpv4vars.emb_sport &&
f->dp == p->icmpv4vars.emb_dport &&
f->proto == ICMPV4_GET_EMB_PROTO(p) &&
- f->recursion_level == p->recursion_level)
+ f->recursion_level == p->recursion_level &&
+ f->vlan_id[0] == p->vlan_id[0] &&
+ f->vlan_id[1] == p->vlan_id[1])
{
return 1;
f->dp == p->icmpv4vars.emb_sport &&
f->sp == p->icmpv4vars.emb_dport &&
f->proto == ICMPV4_GET_EMB_PROTO(p) &&
- f->recursion_level == p->recursion_level)
+ f->recursion_level == p->recursion_level &&
+ f->vlan_id[0] == p->vlan_id[0] &&
+ f->vlan_id[1] == p->vlan_id[1])
{
return 1;
}
f->proto = p->proto;
f->recursion_level = p->recursion_level;
+ f->vlan_id[0] = p->vlan_id[0];
+ f->vlan_id[1] = p->vlan_id[1];
if (PKT_IS_IPV4(p)) {
FLOW_SET_IPV4_SRC_ADDR_FROM_PACKET(p, &f->src);
};
uint8_t proto;
uint8_t recursion_level;
+ uint16_t vlan_id[2];
/* end of flow "header" */