Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 306127 Details for
Bug 446085
RFE: GFS: Optimise loop in gfs_bitfit
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
Upstream version of the proposed patch
446085.upstream.patch (text/plain), 4.34 KB, created by
Robert Peterson
on 2008-05-20 14:27:55 UTC
(
hide
)
Description:
Upstream version of the proposed patch
Filename:
MIME Type:
Creator:
Robert Peterson
Created:
2008-05-20 14:27:55 UTC
Size:
4.34 KB
patch
obsolete
> gfs-kernel/src/gfs/bits.c | 85 +++++++++++++++++++++++++++++++------------- > gfs-kernel/src/gfs/bits.h | 3 +- > gfs-kernel/src/gfs/rgrp.c | 3 +- > 3 files changed, 62 insertions(+), 29 deletions(-) > >diff --git a/gfs-kernel/src/gfs/bits.c b/gfs-kernel/src/gfs/bits.c >index da08a23..55427bb 100644 >--- a/gfs-kernel/src/gfs/bits.c >+++ b/gfs-kernel/src/gfs/bits.c >@@ -31,6 +31,16 @@ > #include "gfs.h" > #include "bits.h" > >+#if BITS_PER_LONG == 32 >+#define LBITMASK (0x55555555UL) >+#define LBITSKIP55 (0x55555555UL) >+#define LBITSKIP00 (0x00000000UL) >+#else >+#define LBITMASK (0x5555555555555555UL) >+#define LBITSKIP55 (0x5555555555555555UL) >+#define LBITSKIP00 (0x0000000000000000UL) >+#endif >+ > static const char valid_change[16] = { > /* current */ > /* n */ 0, 1, 1, 1, >@@ -115,41 +125,66 @@ gfs_testbit(struct gfs_rgrpd *rgd, > */ > > uint32_t >-gfs_bitfit(struct gfs_rgrpd *rgd, >- unsigned char *buffer, unsigned int buflen, >+gfs_bitfit(unsigned char *buffer, unsigned int buflen, > uint32_t goal, unsigned char old_state) > { >- unsigned char *byte, *end, alloc; >- uint32_t blk = goal; >- unsigned int bit; >- >- byte = buffer + (goal / GFS_NBBY); >- bit = (goal % GFS_NBBY) * GFS_BIT_SIZE; >- end = buffer + buflen; >- alloc = (old_state & 1) ? 0 : 0x55; >- >+ const u8 *byte, *start, *end; >+ int bit, startbit; >+ u32 g1, g2, misaligned; >+ unsigned long *plong; >+ unsigned long lskipval; >+ >+ lskipval = (old_state & GFS_BLKST_USED) ? LBITSKIP00 : LBITSKIP55; >+ g1 = (goal / GFS_NBBY); >+ start = buffer + g1; >+ byte = start; >+ end = buffer + buflen; >+ g2 = ALIGN(g1, sizeof(unsigned long)); >+ plong = (unsigned long *)(buffer + g2); >+ startbit = bit = (goal % GFS_NBBY) * GFS_BIT_SIZE; >+ misaligned = g2 - g1; >+ if (!misaligned) >+ goto ulong_aligned; >+/* parse the bitmap a byte at a time */ >+misaligned: > while (byte < end) { >- if ((*byte & 0x55) == alloc) { >- blk += (8 - bit) >> 1; >- >- bit = 0; >- byte++; >- >- continue; >+ if (((*byte >> bit) & GFS_BIT_MASK) == old_state) { >+ return goal + >+ (((byte - start) * GFS_NBBY) + >+ ((bit - startbit) >> 1)); > } >- >- if (((*byte >> bit) & GFS_BIT_MASK) == old_state) >- return blk; >- > bit += GFS_BIT_SIZE; >- if (bit >= 8) { >+ if (bit >= GFS_NBBY * GFS_BIT_SIZE) { > bit = 0; > byte++; >+ misaligned--; >+ if (!misaligned) { >+ plong = (unsigned long *)byte; >+ goto ulong_aligned; >+ } > } >- >- blk++; > } >+ return BFITNOENT; > >+/* parse the bitmap a unsigned long at a time */ >+ulong_aligned: >+ /* Stop at "end - 1" or else prefetch can go past the end and segfault. >+ We could "if" it but we'd lose some of the performance gained. >+ This way will only slow down searching the very last 4/8 bytes >+ depending on architecture. I've experimented with several ways >+ of writing this section such as using an else before the goto >+ but this one seems to be the fastest. */ >+ while ((unsigned char *)plong < end - 1) { >+ prefetch(plong + 1); >+ if (((*plong) & LBITMASK) != lskipval) >+ break; >+ plong++; >+ } >+ if ((unsigned char *)plong < end) { >+ byte = (const u8 *)plong; >+ misaligned += sizeof(unsigned long) - 1; >+ goto misaligned; >+ } > return BFITNOENT; > } > >diff --git a/gfs-kernel/src/gfs/bits.h b/gfs-kernel/src/gfs/bits.h >index ed38102..9990bae 100644 >--- a/gfs-kernel/src/gfs/bits.h >+++ b/gfs-kernel/src/gfs/bits.h >@@ -22,8 +22,7 @@ void gfs_setbit(struct gfs_rgrpd *rgd, > unsigned char gfs_testbit(struct gfs_rgrpd *rgd, > unsigned char *buffer, unsigned int buflen, > uint32_t block); >-uint32_t gfs_bitfit(struct gfs_rgrpd *rgd, >- unsigned char *buffer, unsigned int buflen, >+uint32_t gfs_bitfit(unsigned char *buffer, unsigned int buflen, > uint32_t goal, unsigned char old_state); > uint32_t gfs_bitcount(struct gfs_rgrpd *rgd, > unsigned char *buffer, unsigned int buflen, >diff --git a/gfs-kernel/src/gfs/rgrp.c b/gfs-kernel/src/gfs/rgrp.c >index dd7fdcb..f0a4e3f 100644 >--- a/gfs-kernel/src/gfs/rgrp.c >+++ b/gfs-kernel/src/gfs/rgrp.c >@@ -1444,8 +1444,7 @@ blkalloc_internal(struct gfs_rgrpd *rgd, > allocatable block anywhere else, we want to be able wrap around and > search in the first part of our first-searched bit block. */ > for (x = 0; x <= length; x++) { >- blk = gfs_bitfit(rgd, >- rgd->rd_bh[buf]->b_data + bits->bi_offset, >+ blk = gfs_bitfit(rgd->rd_bh[buf]->b_data + bits->bi_offset, > bits->bi_len, goal, old_state); > if (blk != BFITNOENT) > break;
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 446085
:
306122
|
306123
| 306127