summaryrefslogtreecommitdiff
path: root/fs/smb/client/compress.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/smb/client/compress.c')
-rw-r--r--fs/smb/client/compress.c94
1 files changed, 40 insertions, 54 deletions
diff --git a/fs/smb/client/compress.c b/fs/smb/client/compress.c
index 766b4de13da7..e0c44b46080e 100644
--- a/fs/smb/client/compress.c
+++ b/fs/smb/client/compress.c
@@ -44,7 +44,7 @@ struct bucket {
unsigned int count;
};
-/**
+/*
* has_low_entropy() - Compute Shannon entropy of the sampled data.
* @bkt: Bytes counts of the sample.
* @slen: Size of the sample.
@@ -82,7 +82,7 @@ static bool has_low_entropy(struct bucket *bkt, size_t slen)
#define BYTE_DIST_BAD 0
#define BYTE_DIST_GOOD 1
#define BYTE_DIST_MAYBE 2
-/**
+/*
* calc_byte_distribution() - Compute byte distribution on the sampled data.
* @bkt: Byte counts of the sample.
* @slen: Size of the sample.
@@ -155,63 +155,34 @@ static int cmp_bkt(const void *_a, const void *_b)
}
/*
- * TODO:
- * Support other iter types, if required.
- * Only ITER_XARRAY is supported for now.
+ * Collect some 2K samples with 2K gaps between.
*/
-static int collect_sample(const struct iov_iter *iter, ssize_t max, u8 *sample)
+static int collect_sample(const struct iov_iter *source, ssize_t max, u8 *sample)
{
- struct folio *folios[16], *folio;
- unsigned int nr, i, j, npages;
- loff_t start = iter->xarray_start + iter->iov_offset;
- pgoff_t last, index = start / PAGE_SIZE;
- size_t len, off, foff;
- void *p;
- int s = 0;
-
- last = (start + max - 1) / PAGE_SIZE;
- do {
- nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios),
- XA_PRESENT);
- if (nr == 0)
- return -EIO;
-
- for (i = 0; i < nr; i++) {
- folio = folios[i];
- npages = folio_nr_pages(folio);
- foff = start - folio_pos(folio);
- off = foff % PAGE_SIZE;
-
- for (j = foff / PAGE_SIZE; j < npages; j++) {
- size_t len2;
-
- len = min_t(size_t, max, PAGE_SIZE - off);
- len2 = min_t(size_t, len, SZ_2K);
-
- p = kmap_local_page(folio_page(folio, j));
- memcpy(&sample[s], p, len2);
- kunmap_local(p);
-
- s += len2;
-
- if (len2 < SZ_2K || s >= max - SZ_2K)
- return s;
-
- max -= len;
- if (max <= 0)
- return s;
-
- start += len;
- off = 0;
- index++;
- }
- }
- } while (nr == ARRAY_SIZE(folios));
+ struct iov_iter iter = *source;
+ size_t s = 0;
+
+ while (iov_iter_count(&iter) >= SZ_2K) {
+ size_t part = umin(umin(iov_iter_count(&iter), SZ_2K), max);
+ size_t n;
+
+ n = copy_from_iter(sample + s, part, &iter);
+ if (n != part)
+ return -EFAULT;
+
+ s += n;
+ max -= n;
+
+ if (iov_iter_count(&iter) < PAGE_SIZE - SZ_2K)
+ break;
+
+ iov_iter_advance(&iter, SZ_2K);
+ }
return s;
}
-/**
+/*
* is_compressible() - Determines if a chunk of data is compressible.
* @data: Iterator containing uncompressed data.
*
@@ -290,6 +261,21 @@ out:
return ret;
}
+/*
+ * should_compress() - Determines if a request (write) or the response to a
+ * request (read) should be compressed.
+ * @tcon: tcon of the request is being sent to
+ * @rqst: request to evaluate
+ *
+ * Return: true iff:
+ * - compression was successfully negotiated with server
+ * - server has enabled compression for the share
+ * - it's a read or write request
+ * - (write only) request length is >= SMB_COMPRESS_MIN_LEN
+ * - (write only) is_compressible() returns 1
+ *
+ * Return false otherwise.
+ */
bool should_compress(const struct cifs_tcon *tcon, const struct smb_rqst *rq)
{
const struct smb2_hdr *shdr = rq->rq_iov->iov_base;
@@ -339,7 +325,7 @@ int smb_compress(struct TCP_Server_Info *server, struct smb_rqst *rq, compress_s
iter = rq->rq_iter;
if (!copy_from_iter_full(src, slen, &iter)) {
- ret = -EIO;
+ ret = smb_EIO(smb_eio_trace_compress_copy);
goto err_free;
}