summaryrefslogtreecommitdiff
path: root/meta-security/recipes-ids/suricata
diff options
context:
space:
mode:
Diffstat (limited to 'meta-security/recipes-ids/suricata')
-rw-r--r--meta-security/recipes-ids/suricata/files/CVE-2024-37151.patch53
-rw-r--r--meta-security/recipes-ids/suricata/files/CVE-2024-38534.patch44
-rw-r--r--meta-security/recipes-ids/suricata/files/CVE-2024-38535.patch57
-rw-r--r--meta-security/recipes-ids/suricata/files/CVE-2024-38535_pre.patch292
-rw-r--r--meta-security/recipes-ids/suricata/files/CVE-2024-38536.patch40
-rw-r--r--meta-security/recipes-ids/suricata/files/CVE-2024-45797.patch148
-rw-r--r--meta-security/recipes-ids/suricata/libhtp_0.5.45.bb8
-rw-r--r--meta-security/recipes-ids/suricata/suricata_7.0.0.bb20
8 files changed, 655 insertions, 7 deletions
diff --git a/meta-security/recipes-ids/suricata/files/CVE-2024-37151.patch b/meta-security/recipes-ids/suricata/files/CVE-2024-37151.patch
new file mode 100644
index 0000000000..7e5d8e2708
--- /dev/null
+++ b/meta-security/recipes-ids/suricata/files/CVE-2024-37151.patch
@@ -0,0 +1,53 @@
+From a6052dca1e27f3c8f96ec7be0fe7514c56a0d56f Mon Sep 17 00:00:00 2001
+From: Victor Julien <vjulien@oisf.net>
+Date: Tue, 4 Jun 2024 14:43:22 +0200
+Subject: [PATCH 1/4] defrag: don't use completed tracker
+
+When a Tracker is set up for a IPID, frags come in for it and it's
+reassembled and complete, the `DefragTracker::remove` flag is set. This
+is mean to tell the hash cleanup code to recyle the tracker and to let
+the lookup code skip the tracker during lookup.
+
+A logic error lead to the following scenario:
+
+1. there are sufficient frag trackers to make sure the hash table is
+ filled with trackers
+2. frags for a Packet with IPID X are processed correctly (X1)
+3. frags for a new Packet that also has IPID X come in quickly after the
+ first (X2).
+4. during the lookup, the frag for X2 hashes to a hash row that holds
+ more than one tracker
+5. as the trackers in hash row are evaluated, it finds the tracker for
+ X1, but since the `remove` bit is not checked, it is returned as the
+ tracker for X2.
+6. reassembly fails, as the tracker is already complete
+
+The logic error is that only for the first tracker in a row the `remove`
+bit was checked, leading to reuse to a closed tracker if there were more
+trackers in the hash row.
+
+Ticket: #7042.
+
+Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/aab7f35c76721df19403a7c0c0025feae12f3b6b]
+CVE: CVE-2024-37151
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/defrag-hash.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/defrag-hash.c b/src/defrag-hash.c
+index 2f19ce2..87d40f9 100644
+--- a/src/defrag-hash.c
++++ b/src/defrag-hash.c
+@@ -591,7 +591,7 @@ DefragTracker *DefragGetTrackerFromHash (Packet *p)
+ return dt;
+ }
+
+- if (DefragTrackerCompare(dt, p) != 0) {
++ if (!dt->remove && DefragTrackerCompare(dt, p) != 0) {
+ /* we found our tracker, lets put it on top of the
+ * hash list -- this rewards active trackers */
+ if (dt->hnext) {
+--
+2.44.0
+
diff --git a/meta-security/recipes-ids/suricata/files/CVE-2024-38534.patch b/meta-security/recipes-ids/suricata/files/CVE-2024-38534.patch
new file mode 100644
index 0000000000..14a958cb11
--- /dev/null
+++ b/meta-security/recipes-ids/suricata/files/CVE-2024-38534.patch
@@ -0,0 +1,44 @@
+From f1645ea911d4e90b1be8ee5863e8e1a665079cce Mon Sep 17 00:00:00 2001
+From: Philippe Antoine <pantoine@oisf.net>
+Date: Thu, 25 Apr 2024 21:24:33 +0200
+Subject: [PATCH 2/4] modbus: abort flow parsing on flood
+
+Ticket: 6987
+
+Let's not spend more resources for a flow which is trying to
+make us do it...
+
+(cherry picked from commit 37509e8e0ed097f8e0174df754835ac60584fc72)
+
+Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/a753cdbe84caee3b66d0bf49b2712d29a50d67ae]
+CVE: CVE-2024-38534
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ rust/src/modbus/modbus.rs | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/rust/src/modbus/modbus.rs b/rust/src/modbus/modbus.rs
+index 246e9ca..d2f7c6b 100644
+--- a/rust/src/modbus/modbus.rs
++++ b/rust/src/modbus/modbus.rs
+@@ -189,7 +189,7 @@ impl ModbusState {
+ None => {
+ let mut tx = match self.new_tx() {
+ Some(tx) => tx,
+- None => return AppLayerResult::ok(),
++ None => return AppLayerResult::err(),
+ };
+ tx.set_events_from_flags(&msg.error_flags);
+ tx.request = Some(msg);
+@@ -215,7 +215,7 @@ impl ModbusState {
+ None => {
+ let mut tx = match self.new_tx() {
+ Some(tx) => tx,
+- None => return AppLayerResult::ok(),
++ None => return AppLayerResult::err(),
+ };
+ if msg
+ .access_type
+--
+2.44.0
+
diff --git a/meta-security/recipes-ids/suricata/files/CVE-2024-38535.patch b/meta-security/recipes-ids/suricata/files/CVE-2024-38535.patch
new file mode 100644
index 0000000000..7ac72c8b19
--- /dev/null
+++ b/meta-security/recipes-ids/suricata/files/CVE-2024-38535.patch
@@ -0,0 +1,57 @@
+From 6b00dc36d7527f051c2346f03d20f8d9e5a60138 Mon Sep 17 00:00:00 2001
+From: Philippe Antoine <pantoine@oisf.net>
+Date: Mon, 17 Jun 2024 16:30:49 +0200
+Subject: [PATCH 3/4] http2: do not expand duplicate headers
+
+Ticket: 7104
+
+As this can cause a big mamory allocation due to the quadratic
+nature of the HPACK compression.
+
+(cherry picked from commit 5bd17934df321b88f502d48afdd6cc8bad4787a7)
+
+Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/c82fa5ca0d1ce0bd8f936e0b860707a6571373b2]
+CVE: CVE-2024-38535
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ rust/src/http2/detect.rs | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/rust/src/http2/detect.rs b/rust/src/http2/detect.rs
+index 99261ad..9c2f8ab 100644
+--- a/rust/src/http2/detect.rs
++++ b/rust/src/http2/detect.rs
+@@ -432,11 +432,11 @@ pub fn http2_frames_get_header_value_vec(
+ if found == 0 {
+ vec.extend_from_slice(&block.value);
+ found = 1;
+- } else if found == 1 {
++ } else if found == 1 && Rc::strong_count(&block.name) <= 2 {
+ vec.extend_from_slice(&[b',', b' ']);
+ vec.extend_from_slice(&block.value);
+ found = 2;
+- } else {
++ } else if Rc::strong_count(&block.name) <= 2 {
+ vec.extend_from_slice(&[b',', b' ']);
+ vec.extend_from_slice(&block.value);
+ }
+@@ -469,14 +469,14 @@ fn http2_frames_get_header_value<'a>(
+ if found == 0 {
+ single = Ok(&block.value);
+ found = 1;
+- } else if found == 1 {
++ } else if found == 1 && Rc::strong_count(&block.name) <= 2 {
+ if let Ok(s) = single {
+ vec.extend_from_slice(s);
+ }
+ vec.extend_from_slice(&[b',', b' ']);
+ vec.extend_from_slice(&block.value);
+ found = 2;
+- } else {
++ } else if Rc::strong_count(&block.name) <= 2 {
+ vec.extend_from_slice(&[b',', b' ']);
+ vec.extend_from_slice(&block.value);
+ }
+--
+2.44.0
+
diff --git a/meta-security/recipes-ids/suricata/files/CVE-2024-38535_pre.patch b/meta-security/recipes-ids/suricata/files/CVE-2024-38535_pre.patch
new file mode 100644
index 0000000000..2aa42c465a
--- /dev/null
+++ b/meta-security/recipes-ids/suricata/files/CVE-2024-38535_pre.patch
@@ -0,0 +1,292 @@
+From 390f09692eb99809c679d3f350c7cc185d163e1a Mon Sep 17 00:00:00 2001
+From: Philippe Antoine <pantoine@oisf.net>
+Date: Wed, 27 Mar 2024 14:33:54 +0100
+Subject: [PATCH] http2: use a reference counter for headers
+
+Ticket: 6892
+
+As HTTP hpack header compression allows one single byte to
+express a previously seen arbitrary-size header block (name+value)
+we should avoid to copy the vectors data, but just point
+to the same data, while reamining memory safe, even in the case
+of later headers eviction from the dybnamic table.
+
+Rust std solution is Rc, and the use of clone, so long as the
+data is accessed by only one thread.
+
+Note: This patch is needed to patch CVE-2024-38535 as it defines Rc.
+Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/390f09692eb99809c679d3f350c7cc185d163e1a]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ rust/src/http2/detect.rs | 19 +++++++------
+ rust/src/http2/http2.rs | 2 +-
+ rust/src/http2/parser.rs | 61 +++++++++++++++++++++-------------------
+ 3 files changed, 43 insertions(+), 39 deletions(-)
+
+diff --git a/rust/src/http2/detect.rs b/rust/src/http2/detect.rs
+index 9c2f8ab..e068a17 100644
+--- a/rust/src/http2/detect.rs
++++ b/rust/src/http2/detect.rs
+@@ -23,6 +23,7 @@ use crate::core::Direction;
+ use crate::detect::uint::{detect_match_uint, DetectUintData};
+ use std::ffi::CStr;
+ use std::str::FromStr;
++use std::rc::Rc;
+
+ fn http2_tx_has_frametype(
+ tx: &mut HTTP2Transaction, direction: Direction, value: u8,
+@@ -404,7 +405,7 @@ fn http2_frames_get_header_firstvalue<'a>(
+ for frame in frames {
+ if let Some(blocks) = http2_header_blocks(frame) {
+ for block in blocks.iter() {
+- if block.name == name.as_bytes() {
++ if block.name.as_ref() == name.as_bytes() {
+ return Ok(&block.value);
+ }
+ }
+@@ -428,7 +429,7 @@ pub fn http2_frames_get_header_value_vec(
+ for frame in frames {
+ if let Some(blocks) = http2_header_blocks(frame) {
+ for block in blocks.iter() {
+- if block.name == name.as_bytes() {
++ if block.name.as_ref() == name.as_bytes() {
+ if found == 0 {
+ vec.extend_from_slice(&block.value);
+ found = 1;
+@@ -465,7 +466,7 @@ fn http2_frames_get_header_value<'a>(
+ for frame in frames {
+ if let Some(blocks) = http2_header_blocks(frame) {
+ for block in blocks.iter() {
+- if block.name == name.as_bytes() {
++ if block.name.as_ref() == name.as_bytes() {
+ if found == 0 {
+ single = Ok(&block.value);
+ found = 1;
+@@ -905,8 +906,8 @@ fn http2_tx_set_header(state: &mut HTTP2State, name: &[u8], input: &[u8]) {
+ };
+ let mut blocks = Vec::new();
+ let b = parser::HTTP2FrameHeaderBlock {
+- name: name.to_vec(),
+- value: input.to_vec(),
++ name: Rc::new(name.to_vec()),
++ value: Rc::new(input.to_vec()),
+ error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
+ sizeupdate: 0,
+ };
+@@ -1061,15 +1062,15 @@ mod tests {
+ };
+ let mut blocks = Vec::new();
+ let b = parser::HTTP2FrameHeaderBlock {
+- name: "Host".as_bytes().to_vec(),
+- value: "abc.com".as_bytes().to_vec(),
++ name: "Host".as_bytes().to_vec().into(),
++ value: "abc.com".as_bytes().to_vec().into(),
+ error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
+ sizeupdate: 0,
+ };
+ blocks.push(b);
+ let b2 = parser::HTTP2FrameHeaderBlock {
+- name: "Host".as_bytes().to_vec(),
+- value: "efg.net".as_bytes().to_vec(),
++ name: "Host".as_bytes().to_vec().into(),
++ value: "efg.net".as_bytes().to_vec().into(),
+ error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
+ sizeupdate: 0,
+ };
+diff --git a/rust/src/http2/http2.rs b/rust/src/http2/http2.rs
+index 326030f..d14ca06 100644
+--- a/rust/src/http2/http2.rs
++++ b/rust/src/http2/http2.rs
+@@ -204,7 +204,7 @@ impl HTTP2Transaction {
+
+ fn handle_headers(&mut self, blocks: &[parser::HTTP2FrameHeaderBlock], dir: Direction) {
+ for block in blocks {
+- if block.name == b"content-encoding" {
++ if block.name.as_ref() == b"content-encoding" {
+ self.decoder.http2_encoding_fromvec(&block.value, dir);
+ }
+ }
+diff --git a/rust/src/http2/parser.rs b/rust/src/http2/parser.rs
+index adabeb2..1a46437 100644
+--- a/rust/src/http2/parser.rs
++++ b/rust/src/http2/parser.rs
+@@ -30,6 +30,7 @@ use nom7::sequence::tuple;
+ use nom7::{Err, IResult};
+ use std::fmt;
+ use std::str::FromStr;
++use std::rc::Rc;
+
+ #[repr(u8)]
+ #[derive(Clone, Copy, PartialEq, Eq, FromPrimitive, Debug)]
+@@ -295,8 +296,8 @@ fn http2_frame_header_static(n: u64, dyn_headers: &HTTP2DynTable) -> Option<HTTP
+ };
+ if !name.is_empty() {
+ return Some(HTTP2FrameHeaderBlock {
+- name: name.as_bytes().to_vec(),
+- value: value.as_bytes().to_vec(),
++ name: Rc::new(name.as_bytes().to_vec()),
++ value: Rc::new(value.as_bytes().to_vec()),
+ error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
+ sizeupdate: 0,
+ });
+@@ -304,23 +305,23 @@ fn http2_frame_header_static(n: u64, dyn_headers: &HTTP2DynTable) -> Option<HTTP
+ //use dynamic table
+ if n == 0 {
+ return Some(HTTP2FrameHeaderBlock {
+- name: Vec::new(),
+- value: Vec::new(),
++ name: Rc::new(Vec::new()),
++ value: Rc::new(Vec::new()),
+ error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeIndex0,
+ sizeupdate: 0,
+ });
+ } else if dyn_headers.table.len() + HTTP2_STATIC_HEADERS_NUMBER < n as usize {
+ return Some(HTTP2FrameHeaderBlock {
+- name: Vec::new(),
+- value: Vec::new(),
++ name: Rc::new(Vec::new()),
++ value: Rc::new(Vec::new()),
+ error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeNotIndexed,
+ sizeupdate: 0,
+ });
+ } else {
+ let indyn = dyn_headers.table.len() - (n as usize - HTTP2_STATIC_HEADERS_NUMBER);
+ let headcopy = HTTP2FrameHeaderBlock {
+- name: dyn_headers.table[indyn].name.to_vec(),
+- value: dyn_headers.table[indyn].value.to_vec(),
++ name: dyn_headers.table[indyn].name.clone(),
++ value: dyn_headers.table[indyn].value.clone(),
+ error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
+ sizeupdate: 0,
+ };
+@@ -348,8 +349,10 @@ impl fmt::Display for HTTP2HeaderDecodeStatus {
+
+ #[derive(Clone, Debug)]
+ pub struct HTTP2FrameHeaderBlock {
+- pub name: Vec<u8>,
+- pub value: Vec<u8>,
++ // Use Rc reference counted so that indexed headers do not get copied.
++ // Otherwise, this leads to quadratic complexity in memory occupation.
++ pub name: Rc<Vec<u8>>,
++ pub value: Rc<Vec<u8>>,
+ pub error: HTTP2HeaderDecodeStatus,
+ pub sizeupdate: u64,
+ }
+@@ -391,7 +394,7 @@ fn http2_parse_headers_block_literal_common<'a>(
+ ) -> IResult<&'a [u8], HTTP2FrameHeaderBlock> {
+ let (i3, name, error) = if index == 0 {
+ match http2_parse_headers_block_string(input) {
+- Ok((r, n)) => Ok((r, n, HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess)),
++ Ok((r, n)) => Ok((r, Rc::new(n), HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess)),
+ Err(e) => Err(e),
+ }
+ } else {
+@@ -403,7 +406,7 @@ fn http2_parse_headers_block_literal_common<'a>(
+ )),
+ None => Ok((
+ input,
+- Vec::new(),
++ Rc::new(Vec::new()),
+ HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeNotIndexed,
+ )),
+ }
+@@ -413,7 +416,7 @@ fn http2_parse_headers_block_literal_common<'a>(
+ i4,
+ HTTP2FrameHeaderBlock {
+ name,
+- value,
++ value: Rc::new(value),
+ error,
+ sizeupdate: 0,
+ },
+@@ -435,8 +438,8 @@ fn http2_parse_headers_block_literal_incindex<'a>(
+ match r {
+ Ok((r, head)) => {
+ let headcopy = HTTP2FrameHeaderBlock {
+- name: head.name.to_vec(),
+- value: head.value.to_vec(),
++ name: head.name.clone(),
++ value: head.value.clone(),
+ error: head.error,
+ sizeupdate: 0,
+ };
+@@ -556,8 +559,8 @@ fn http2_parse_headers_block_dynamic_size<'a>(
+ return Ok((
+ i3,
+ HTTP2FrameHeaderBlock {
+- name: Vec::new(),
+- value: Vec::new(),
++ name: Rc::new(Vec::new()),
++ value: Rc::new(Vec::new()),
+ error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSizeUpdate,
+ sizeupdate: maxsize2,
+ },
+@@ -614,8 +617,8 @@ fn http2_parse_headers_blocks<'a>(
+ // if we error from http2_parse_var_uint, we keep the first parsed headers
+ if err.code == ErrorKind::LengthValue {
+ blocks.push(HTTP2FrameHeaderBlock {
+- name: Vec::new(),
+- value: Vec::new(),
++ name: Rc::new(Vec::new()),
++ value: Rc::new(Vec::new()),
+ error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeIntegerOverflow,
+ sizeupdate: 0,
+ });
+@@ -765,8 +768,8 @@ mod tests {
+ match r0 {
+ Ok((remainder, hd)) => {
+ // Check the first message.
+- assert_eq!(hd.name, ":method".as_bytes().to_vec());
+- assert_eq!(hd.value, "GET".as_bytes().to_vec());
++ assert_eq!(hd.name, ":method".as_bytes().to_vec().into());
++ assert_eq!(hd.value, "GET".as_bytes().to_vec().into());
+ // And we should have no bytes left.
+ assert_eq!(remainder.len(), 0);
+ }
+@@ -782,8 +785,8 @@ mod tests {
+ match r1 {
+ Ok((remainder, hd)) => {
+ // Check the first message.
+- assert_eq!(hd.name, "accept".as_bytes().to_vec());
+- assert_eq!(hd.value, "*/*".as_bytes().to_vec());
++ assert_eq!(hd.name, "accept".as_bytes().to_vec().into());
++ assert_eq!(hd.value, "*/*".as_bytes().to_vec().into());
+ // And we should have no bytes left.
+ assert_eq!(remainder.len(), 0);
+ assert_eq!(dynh.table.len(), 1);
+@@ -802,8 +805,8 @@ mod tests {
+ match result {
+ Ok((remainder, hd)) => {
+ // Check the first message.
+- assert_eq!(hd.name, ":authority".as_bytes().to_vec());
+- assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec());
++ assert_eq!(hd.name, ":authority".as_bytes().to_vec().into());
++ assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec().into());
+ // And we should have no bytes left.
+ assert_eq!(remainder.len(), 0);
+ assert_eq!(dynh.table.len(), 2);
+@@ -820,8 +823,8 @@ mod tests {
+ match r3 {
+ Ok((remainder, hd)) => {
+ // same as before
+- assert_eq!(hd.name, ":authority".as_bytes().to_vec());
+- assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec());
++ assert_eq!(hd.name, ":authority".as_bytes().to_vec().into());
++ assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec().into());
+ // And we should have no bytes left.
+ assert_eq!(remainder.len(), 0);
+ assert_eq!(dynh.table.len(), 2);
+@@ -856,8 +859,8 @@ mod tests {
+ match r2 {
+ Ok((remainder, hd)) => {
+ // Check the first message.
+- assert_eq!(hd.name, ":path".as_bytes().to_vec());
+- assert_eq!(hd.value, "/doc/manual/html/index.html".as_bytes().to_vec());
++ assert_eq!(hd.name, ":path".as_bytes().to_vec().into());
++ assert_eq!(hd.value, "/doc/manual/html/index.html".as_bytes().to_vec().into());
+ // And we should have no bytes left.
+ assert_eq!(remainder.len(), 0);
+ assert_eq!(dynh.table.len(), 2);
+--
+2.44.0
+
diff --git a/meta-security/recipes-ids/suricata/files/CVE-2024-38536.patch b/meta-security/recipes-ids/suricata/files/CVE-2024-38536.patch
new file mode 100644
index 0000000000..2d4b3d78cf
--- /dev/null
+++ b/meta-security/recipes-ids/suricata/files/CVE-2024-38536.patch
@@ -0,0 +1,40 @@
+From 4026bca7f04c419dd3f3ba17a1af17bbcbcf18bc Mon Sep 17 00:00:00 2001
+From: Philippe Antoine <pantoine@oisf.net>
+Date: Fri, 17 May 2024 09:39:52 +0200
+Subject: [PATCH 4/4] http: fix nul deref on memcap reached
+
+HttpRangeOpenFileAux may return NULL in different cases, including
+when memcap is reached.
+But is only caller did not check it before calling HttpRangeAppendData
+which would dereference the NULL value.
+
+Ticket: 7029
+(cherry picked from commit fd262df457f67f2174752dd6505ba2ed5911fd96)
+
+Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/2bd3bd0e318f19008e9fe068ab17277c530ffb92]
+CVE: CVE-2024-38536
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/app-layer-htp-range.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/src/app-layer-htp-range.c b/src/app-layer-htp-range.c
+index 3cdde35..f0d75a9 100644
+--- a/src/app-layer-htp-range.c
++++ b/src/app-layer-htp-range.c
+@@ -351,8 +351,10 @@ static HttpRangeContainerBlock *HttpRangeOpenFile(HttpRangeContainerFile *c, uin
+ {
+ HttpRangeContainerBlock *r =
+ HttpRangeOpenFileAux(c, start, end, total, sbcfg, name, name_len, flags);
+- if (HttpRangeAppendData(sbcfg, r, data, len) < 0) {
+- SCLogDebug("Failed to append data while opening");
++ if (r) {
++ if (HttpRangeAppendData(sbcfg, r, data, len) < 0) {
++ SCLogDebug("Failed to append data while opening");
++ }
+ }
+ return r;
+ }
+--
+2.44.0
+
diff --git a/meta-security/recipes-ids/suricata/files/CVE-2024-45797.patch b/meta-security/recipes-ids/suricata/files/CVE-2024-45797.patch
new file mode 100644
index 0000000000..3db4625224
--- /dev/null
+++ b/meta-security/recipes-ids/suricata/files/CVE-2024-45797.patch
@@ -0,0 +1,148 @@
+From 0d550de551b91d5e57ba23e2b1e2c6430fad6818 Mon Sep 17 00:00:00 2001
+From: Philippe Antoine <contact@catenacyber.fr>
+Date: Mon, 12 Aug 2024 14:06:40 +0200
+Subject: [PATCH] headers: put a configurable limit on their numbers
+
+So as to avoid quadratic complexity
+
+Ticket: 7191
+
+Upstream-Status: Backport [https://github.com/OISF/libhtp/commit/0d550de551b91d5e57ba23e2b1e2c6430fad6818]
+CVE: CVE-2024-45797
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ htp/htp_config.c | 8 ++++++++
+ htp/htp_config.h | 8 ++++++++
+ htp/htp_config_private.h | 6 ++++++
+ htp/htp_core.h | 1 +
+ htp/htp_request_generic.c | 11 +++++++++++
+ htp/htp_response_generic.c | 10 ++++++++++
+ 6 files changed, 44 insertions(+)
+
+diff --git a/htp/htp_config.c b/htp/htp_config.c
+index 767458f..9e0eee3 100644
+--- a/htp/htp_config.c
++++ b/htp/htp_config.c
+@@ -145,6 +145,8 @@ static unsigned char bestfit_1252[] = {
+ 0xff, 0x5d, 0x7d, 0xff, 0x5e, 0x7e, 0x00, 0x00, 0x00
+ };
+
++#define HTP_HEADERS_LIMIT 1024
++
+ htp_cfg_t *htp_config_create(void) {
+ htp_cfg_t *cfg = calloc(1, sizeof (htp_cfg_t));
+ if (cfg == NULL) return NULL;
+@@ -163,6 +165,7 @@ htp_cfg_t *htp_config_create(void) {
+ cfg->response_lzma_layer_limit = 1; // default is only one layer
+ cfg->compression_bomb_limit = HTP_COMPRESSION_BOMB_LIMIT;
+ cfg->compression_time_limit = HTP_COMPRESSION_TIME_LIMIT_USEC;
++ cfg->number_headers_limit = HTP_HEADERS_LIMIT;
+ cfg->allow_space_uri = 0;
+
+ // Default settings for URL-encoded data.
+@@ -542,6 +545,11 @@ void htp_config_set_compression_time_limit(htp_cfg_t *cfg, size_t useclimit) {
+ }
+ }
+
++void htp_config_set_number_headers_limit(htp_cfg_t *cfg, uint32_t limit) {
++ if (cfg == NULL) return;
++ cfg->number_headers_limit = limit;
++}
++
+ void htp_config_set_log_level(htp_cfg_t *cfg, enum htp_log_level_t log_level) {
+ if (cfg == NULL) return;
+ cfg->log_level = log_level;
+diff --git a/htp/htp_config.h b/htp/htp_config.h
+index d1365dc..ed0eaeb 100644
+--- a/htp/htp_config.h
++++ b/htp/htp_config.h
+@@ -466,6 +466,14 @@ void htp_config_set_compression_time_limit(htp_cfg_t *cfg, size_t useclimit);
+ */
+ void htp_config_set_log_level(htp_cfg_t *cfg, enum htp_log_level_t log_level);
+
++/**
++ * Configures the maximum number of headers LibHTP will accept per request or response.
++ *
++ * @param[in] cfg
++ * @param[in] limit
++ */
++void htp_config_set_number_headers_limit(htp_cfg_t *cfg, uint32_t limit);
++
+ /**
+ * Configures how the server reacts to encoded NUL bytes. Some servers will stop at
+ * at NUL, while some will respond with 400 or 404. When the termination option is not
+diff --git a/htp/htp_config_private.h b/htp/htp_config_private.h
+index 5f1d60d..ecc8717 100644
+--- a/htp/htp_config_private.h
++++ b/htp/htp_config_private.h
+@@ -360,6 +360,12 @@ struct htp_cfg_t {
+
+ /** Whether to decompress compressed request bodies. */
+ int request_decompression_enabled;
++
++ /** Maximum number of transactions. */
++ uint32_t max_tx;
++
++ /** Maximum number of headers. */
++ uint32_t number_headers_limit;
+ };
+
+ #ifdef __cplusplus
+diff --git a/htp/htp_core.h b/htp/htp_core.h
+index e4c933e..7c23212 100644
+--- a/htp/htp_core.h
++++ b/htp/htp_core.h
+@@ -235,6 +235,7 @@ enum htp_file_source_t {
+ #define HTP_REQUEST_INVALID 0x100000000ULL
+ #define HTP_REQUEST_INVALID_C_L 0x200000000ULL
+ #define HTP_AUTH_INVALID 0x400000000ULL
++#define HTP_HEADERS_TOO_MANY 0x800000000ULL
+
+ #define HTP_MAX_HEADERS_REPETITIONS 64
+
+diff --git a/htp/htp_request_generic.c b/htp/htp_request_generic.c
+index 435cf0a..1350e57 100644
+--- a/htp/htp_request_generic.c
++++ b/htp/htp_request_generic.c
+@@ -120,6 +120,17 @@ htp_status_t htp_process_request_header_generic(htp_connp_t *connp, unsigned cha
+ bstr_free(h->value);
+ free(h);
+ } else {
++ if (htp_table_size(connp->in_tx->request_headers) > connp->cfg->number_headers_limit) {
++ if (!(connp->in_tx->flags & HTP_HEADERS_TOO_MANY)) {
++ connp->in_tx->flags |= HTP_HEADERS_TOO_MANY;
++ htp_log(connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0, "Too many request headers");
++ }
++ bstr_free(h->name);
++ bstr_free(h->value);
++ free(h);
++ // give up on what comes next
++ return HTP_ERROR;
++ }
+ // Add as a new header.
+ if (htp_table_add(connp->in_tx->request_headers, h->name, h) != HTP_OK) {
+ bstr_free(h->name);
+diff --git a/htp/htp_response_generic.c b/htp/htp_response_generic.c
+index f5fa59e..69da625 100644
+--- a/htp/htp_response_generic.c
++++ b/htp/htp_response_generic.c
+@@ -321,6 +321,16 @@ htp_status_t htp_process_response_header_generic(htp_connp_t *connp, unsigned ch
+ bstr_free(h->value);
+ free(h);
+ } else {
++ if (htp_table_size(connp->out_tx->response_headers) > connp->cfg->number_headers_limit) {
++ if (!(connp->out_tx->flags & HTP_HEADERS_TOO_MANY)) {
++ connp->out_tx->flags |= HTP_HEADERS_TOO_MANY;
++ htp_log(connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0, "Too many response headers");
++ }
++ bstr_free(h->name);
++ bstr_free(h->value);
++ free(h);
++ return HTP_ERROR;
++ }
+ // Add as a new header.
+ if (htp_table_add(connp->out_tx->response_headers, h->name, h) != HTP_OK) {
+ bstr_free(h->name);
+--
+2.25.1
+
diff --git a/meta-security/recipes-ids/suricata/libhtp_0.5.45.bb b/meta-security/recipes-ids/suricata/libhtp_0.5.45.bb
index cc8285ccbe..74a53df471 100644
--- a/meta-security/recipes-ids/suricata/libhtp_0.5.45.bb
+++ b/meta-security/recipes-ids/suricata/libhtp_0.5.45.bb
@@ -4,7 +4,9 @@ require suricata.inc
LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=596ab7963a1a0e5198e5a1c4aa621843"
-SRC_URI = "git://github.com/OISF/libhtp.git;protocol=https;branch=0.5.x"
+SRC_URI = "git://github.com/OISF/libhtp.git;protocol=https;branch=0.5.x \
+ file://CVE-2024-45797.patch \
+ "
SRCREV = "8bdfe7b9d04e5e948c8fbaa7472e14d884cc00af"
DEPENDS = "zlib"
@@ -13,9 +15,9 @@ inherit autotools-brokensep pkgconfig
CFLAGS += "-D_DEFAULT_SOURCE"
-#S = "${WORKDIR}/suricata-${VER}/${BPN}"
+#S = "${UNPACKDIR}/suricata-${VER}/${BPN}"
-S = "${WORKDIR}/git"
+S = "${UNPACKDIR}/git"
do_configure () {
cd ${S}
diff --git a/meta-security/recipes-ids/suricata/suricata_7.0.0.bb b/meta-security/recipes-ids/suricata/suricata_7.0.0.bb
index a01b3d937e..6e6c426041 100644
--- a/meta-security/recipes-ids/suricata/suricata_7.0.0.bb
+++ b/meta-security/recipes-ids/suricata/suricata_7.0.0.bb
@@ -16,6 +16,11 @@ SRC_URI += " \
file://suricata.service \
file://run-ptest \
file://fixup.patch \
+ file://CVE-2024-37151.patch \
+ file://CVE-2024-38534.patch \
+ file://CVE-2024-38535_pre.patch \
+ file://CVE-2024-38535.patch \
+ file://CVE-2024-38536.patch \
"
inherit autotools pkgconfig python3native systemd ptest cargo cargo-update-recipe-crates
@@ -63,9 +68,11 @@ do_configure:prepend () {
# use host for RUST_SURICATA_LIB_XC_DIR
sed -i -e 's,\${host_alias},${RUST_HOST_SYS},' ${S}/configure.ac
sed -i -e 's,libsuricata_rust.a,libsuricata.a,' ${S}/configure.ac
- oe_runconf
+ autotools_do_configure
}
+CFLAGS += "-Wno-error=incompatible-pointer-types"
+
do_compile () {
# we do this to bypass the make provided by this pkg
# patches Makefile to skip the subdir
@@ -82,14 +89,14 @@ do_install () {
oe_runmake install DESTDIR=${D}
install -d ${D}${sysconfdir}/suricata ${D}${sysconfdir}/default/volatiles
- install -m 0644 ${WORKDIR}/volatiles.03_suricata ${D}${sysconfdir}/default/volatiles/03_suricata
+ install -m 0644 ${UNPACKDIR}/volatiles.03_suricata ${D}${sysconfdir}/default/volatiles/03_suricata
install -m 0644 ${S}/threshold.config ${D}${sysconfdir}/suricata
install -m 0644 ${S}/suricata.yaml ${D}${sysconfdir}/suricata
if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then
install -d ${D}${sysconfdir}/tmpfiles.d
- install -m 0644 ${WORKDIR}/tmpfiles.suricata ${D}${sysconfdir}/tmpfiles.d/suricata.conf
+ install -m 0644 ${UNPACKDIR}/tmpfiles.suricata ${D}${sysconfdir}/tmpfiles.d/suricata.conf
install -d ${D}${systemd_unitdir}/system
sed -e s:/etc:${sysconfdir}:g \
@@ -98,7 +105,7 @@ do_install () {
-e s:/usr/bin:${bindir}:g \
-e s:/bin/kill:${base_bindir}/kill:g \
-e s:/usr/lib:${libdir}:g \
- ${WORKDIR}/suricata.service > ${D}${systemd_unitdir}/system/suricata.service
+ ${UNPACKDIR}/suricata.service > ${D}${systemd_unitdir}/system/suricata.service
fi
# Remove /var/run as it is created on startup
@@ -107,6 +114,10 @@ do_install () {
sed -i -e "s:#!.*$:#!${USRBINPATH}/env python3:g" ${D}${bindir}/suricatasc
sed -i -e "s:#!.*$:#!${USRBINPATH}/env python3:g" ${D}${bindir}/suricatactl
sed -i -e "s:#!.*$:#!${USRBINPATH}/env python3:g" ${D}${libdir}/suricata/python/suricata/sc/suricatasc.py
+ # The build process dumps config logs into the binary, remove them.
+ sed -i -e 's#${RECIPE_SYSROOT}##g' ${D}${bindir}/suricata
+ sed -i -e 's#${RECIPE_SYSROOT_NATIVE}##g' ${D}${bindir}/suricata
+ sed -i -e 's#CFLAGS.*##g' ${D}${bindir}/suricata
}
pkg_postinst_ontarget:${PN} () {
@@ -124,3 +135,4 @@ FILES:${PN} += "${systemd_unitdir} ${sysconfdir}/tmpfiles.d"
FILES:${PN}-python = "${bindir}/suricatasc ${PYTHON_SITEPACKAGES_DIR}"
CONFFILES:${PN} = "${sysconfdir}/suricata/suricata.yaml"
+INSANE_SKIP:${PN} = "already-stripped"