[prev in list] [next in list] [prev in thread] [next in thread] 

List:       haiku-commits
Subject:    [haiku-commits] haiku: hrev57057 - src/system/kernel/vm headers/private/kernel/util src/system/kerne
From:       waddlesplash <waddlesplash () gmail ! com>
Date:       2023-05-31 18:47:55
Message-ID: 20230531184755.760323F7A5 () turing ! freelists ! org
[Download RAW message or body]

hrev57057 adds 2 changesets to branch 'master'
old head: 338fedd65a5bc57f3ec35b9c0d48ab5d8b013bd3
new head: fbcc7b2711dcb3ed611911826e5317da3dd510d5
overview: https://git.haiku-os.org/haiku/log/?qt=range&q=fbcc7b2711dc+%5E338fedd65a5b

----------------------------------------------------------------------------

de07bc3fa586: kernel/vm: handle page protections in cut_area
  
  - Resize the `page_protections` array in `cut_area` and also shift
  the bits if necessary.
  - Set the correct protection array as well as the real page
  protections for the second area produced by `cut_area`.
  
  Change-Id: I62293480487e828970ebe5a3bc729cec2a14c687

                                  [ Trung Nguyen <trungnt282910@gmail.com> ]

fbcc7b2711dc: BScrollView: Ajust minimum dimensions as little as necessary.
  
  If the dimensions are already larger than needed, don't add to them.
  Fixes Terminal and other non-layout applications' display
  following the prior change.

                              [ Augustin Cavalier <waddlesplash@gmail.com> ]

----------------------------------------------------------------------------

5 files changed, 195 insertions(+), 47 deletions(-)
headers/private/kernel/util/Bitmap.h       |  57 +++++++++++++
src/kits/interface/ScrollView.cpp          |  10 ++-
src/system/kernel/util/Bitmap.cpp          |  44 +---------
src/system/kernel/vm/vm.cpp                | 110 ++++++++++++++++++++++++-
src/tests/system/kernel/mmap_cut_tests.cpp |  21 ++++-

############################################################################

Commit:      de07bc3fa58600e923d75cb732c60dacf160c098
URL:         https://git.haiku-os.org/haiku/commit/?id=de07bc3fa586
Author:      Trung Nguyen <trungnt282910@gmail.com>
Date:        Wed May 31 00:55:47 2023 UTC
Committer:   Augustin Cavalier <waddlesplash@gmail.com>
Commit-Date: Wed May 31 18:47:40 2023 UTC

kernel/vm: handle page protections in cut_area

- Resize the `page_protections` array in `cut_area` and also shift
the bits if necessary.
- Set the correct protection array as well as the real page
protections for the second area produced by `cut_area`.

Change-Id: I62293480487e828970ebe5a3bc729cec2a14c687

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/util/Bitmap.h b/headers/private/kernel/util/Bitmap.h
index e6f6d69627..a103586c8a 100644
--- a/headers/private/kernel/util/Bitmap.h
+++ b/headers/private/kernel/util/Bitmap.h
@@ -16,6 +16,8 @@
 #	include <Debug.h>
 #endif
 
+#include <string.h>
+
 #include <SupportDefs.h>
 
 namespace BKernel {
@@ -36,6 +38,8 @@ public:
 
 			ssize_t		GetHighestSet() const;
 
+	template<typename T>
+	static	void		Shift(T* bits, size_t bitCount, ssize_t shift);
 private:
 			size_t		fElementsCount;
 			size_t		fSize;
@@ -77,6 +81,59 @@ Bitmap::Clear(size_t index)
 	fBits[kArrayElement] &= ~addr_t(kBitMask);
 }
 
+
+template<typename T>
+void
+Bitmap::Shift(T* bits, size_t bitCount, ssize_t shift)
+{
+	if (shift == 0)
+		return;
+
+	const size_t bitsPerElement = sizeof(T) * 8;
+	const size_t elementsCount = (bitCount + bitsPerElement - 1) / bitsPerElement;
+	const size_t absoluteShift = (shift > 0) ? shift : -shift;
+	const size_t nElements = absoluteShift / bitsPerElement;
+	const size_t nBits = absoluteShift % bitsPerElement;
+	if (nElements != 0) {
+		if (shift > 0) {
+			// "Left" shift.
+			memmove(&bits[nElements], bits, sizeof(T) * (elementsCount - nElements));
+			memset(bits, 0, sizeof(T) * nElements);
+		} else if (shift < 0) {
+			// "Right" shift.
+			memmove(bits, &bits[nElements], sizeof(T) * (elementsCount - nElements));
+			memset(&bits[elementsCount - nElements], 0, sizeof(T) * nElements);
+		}
+	}
+
+	// If the shift was by a multiple of the element size, nothing more to do.
+	if (nBits == 0)
+		return;
+
+	// One set of bits comes from the "current" element and are shifted in the
+	// direction of the shift; the other set comes from the next-processed
+	// element and are shifted in the opposite direction.
+	if (shift > 0) {
+		// "Left" shift.
+		for (ssize_t i = elementsCount - 1; i >= 0; i--) {
+			T low = 0;
+			if (i != 0)
+				low = bits[i - 1] >> (bitsPerElement - nBits);
+			const T high = bits[i] << nBits;
+			bits[i] = low | high;
+		}
+	} else if (shift < 0) {
+		// "Right" shift.
+		for (size_t i = 0; i < elementsCount; i++) {
+			const T low = bits[i] >> nBits;
+			T high = 0;
+			if (i != (elementsCount - 1))
+				high = bits[i + 1] << (bitsPerElement - nBits);
+			bits[i] = low | high;
+		}
+	}
+}
+
 } // namespace BKernel
 
 
diff --git a/src/system/kernel/util/Bitmap.cpp b/src/system/kernel/util/Bitmap.cpp
index af42ec4e2b..a62b4bc01a 100644
--- a/src/system/kernel/util/Bitmap.cpp
+++ b/src/system/kernel/util/Bitmap.cpp
@@ -66,49 +66,7 @@ Bitmap::Resize(size_t bitCount)
 void
 Bitmap::Shift(ssize_t bitCount)
 {
-	if (bitCount == 0)
-		return;
-
-	const size_t shift = (bitCount > 0) ? bitCount : -bitCount;
-	const size_t nElements = shift / kBitsPerElement, nBits = shift % kBitsPerElement;
-	if (nElements != 0) {
-		if (bitCount > 0) {
-			// "Left" shift.
-			memmove(&fBits[nElements], fBits, sizeof(addr_t) * (fElementsCount - nElements));
-			memset(fBits, 0, sizeof(addr_t) * nElements);
-		} else if (bitCount < 0) {
-			// "Right" shift.
-			memmove(fBits, &fBits[nElements], sizeof(addr_t) * (fElementsCount - nElements));
-			memset(&fBits[fElementsCount - nElements], 0, sizeof(addr_t) * nElements);
-		}
-	}
-
-	// If the shift was by a multiple of the element size, nothing more to do.
-	if (nBits == 0)
-		return;
-
-	// One set of bits comes from the "current" element and are shifted in the
-	// direction of the shift; the other set comes from the next-processed
-	// element and are shifted in the opposite direction.
-	if (bitCount > 0) {
-		// "Left" shift.
-		for (ssize_t i = fElementsCount - 1; i >= 0; i--) {
-			addr_t low = 0;
-			if (i != 0)
-				low = fBits[i - 1] >> (kBitsPerElement - nBits);
-			const addr_t high = fBits[i] << nBits;
-			fBits[i] = low | high;
-		}
-	} else if (bitCount < 0) {
-		// "Right" shift.
-		for (size_t i = 0; i < fElementsCount; i++) {
-			const addr_t low = fBits[i] >> nBits;
-			addr_t high = 0;
-			if (i != (fElementsCount - 1))
-				high = fBits[i + 1] << (kBitsPerElement - nBits);
-			fBits[i] = low | high;
-		}
-	}
+	return Bitmap::Shift<addr_t>(fBits, fSize, bitCount);
 }
 
 
diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp
index 292b3826ac..4d476e6f04 100644
--- a/src/system/kernel/vm/vm.cpp
+++ b/src/system/kernel/vm/vm.cpp
@@ -47,6 +47,7 @@
 #include <team.h>
 #include <tracing.h>
 #include <util/AutoLock.h>
+#include <util/Bitmap.h>
 #include <util/ThreadAutoLock.h>
 #include <vm/vm_page.h>
 #include <vm/vm_priv.h>
@@ -676,6 +677,8 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
 	bool onlyCacheUser = cache->areas == area && area->cache_next == NULL
 		&& cache->consumers.IsEmpty() && area->cache_type == CACHE_TYPE_RAM;
 
+	const addr_t oldSize = area->Size();
+
 	// Cut the end only?
 	if (offset > 0 && size == area->Size() - offset) {
 		status_t error = addressSpace->ShrinkAreaTail(area, offset,
@@ -683,6 +686,19 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
 		if (error != B_OK)
 			return error;
 
+		if (area->page_protections != NULL) {
+			size_t bytes = (area->Size() / B_PAGE_SIZE + 1) / 2;
+			uint8* newProtections
+				= (uint8*)realloc(area->page_protections, bytes);
+
+			if (newProtections == NULL) {
+				addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
+				return B_NO_MEMORY;
+			}
+
+			area->page_protections = newProtections;
+		}
+
 		// unmap pages
 		unmap_pages(area, address, size);
 
@@ -705,6 +721,28 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
 		if (error != B_OK)
 			return error;
 
+		if (area->page_protections != NULL) {
+			// Allocate all memory before shifting as the shift might lose some
+			// bits.
+			size_t bytes = (area->Size() / B_PAGE_SIZE + 1) / 2;
+			uint8* newProtections
+				= (uint8*)malloc_etc(bytes, allocationFlags);
+
+			if (newProtections == NULL) {
+				addressSpace->ShrinkAreaHead(area, oldSize, allocationFlags);
+				return B_NO_MEMORY;
+			}
+
+			size_t oldBytes = (oldSize / B_PAGE_SIZE + 1) / 2;
+			ssize_t pagesShifted = (oldSize - area->Size()) / B_PAGE_SIZE;
+			Bitmap::Shift<uint8>(area->page_protections, oldBytes * 8,
+				-(pagesShifted * 4));
+
+			memcpy(newProtections, area->page_protections, bytes);
+			free_etc(area->page_protections, allocationFlags);
+			area->page_protections = newProtections;
+		}
+
 		// unmap pages
 		unmap_pages(area, address, size);
 
@@ -731,12 +769,32 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
 	unmap_pages(area, address, area->Size() - firstNewSize);
 
 	// resize the area
-	addr_t oldSize = area->Size();
 	status_t error = addressSpace->ShrinkAreaTail(area, firstNewSize,
 		allocationFlags);
 	if (error != B_OK)
 		return error;
 
+	uint8* areaNewProtections = NULL;
+	uint8* secondAreaNewProtections = NULL;
+
+	// Try to allocate the new memory before making some hard to reverse
+	// changes.
+	if (area->page_protections != NULL) {
+		size_t areaBytes = (area->Size() / B_PAGE_SIZE + 1) / 2;
+		size_t secondAreaBytes = (secondSize / B_PAGE_SIZE + 1) / 2;
+
+		areaNewProtections = (uint8*)malloc_etc(areaBytes, allocationFlags);
+		secondAreaNewProtections = (uint8*)malloc_etc(secondAreaBytes,
+			allocationFlags);
+
+		if (areaNewProtections == NULL || secondAreaNewProtections == NULL) {
+			addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
+			free_etc(areaNewProtections, allocationFlags);
+			free_etc(secondAreaNewProtections, allocationFlags);
+			return B_NO_MEMORY;
+		}
+	}
+
 	virtual_address_restrictions addressRestrictions = {};
 	addressRestrictions.address = (void*)secondBase;
 	addressRestrictions.address_specification = B_EXACT_ADDRESS;
@@ -750,6 +808,8 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
 			dynamic_cast<VMAnonymousNoSwapCache*>(cache) == NULL, priority);
 		if (error != B_OK) {
 			addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
+			free_etc(areaNewProtections, allocationFlags);
+			free_etc(secondAreaNewProtections, allocationFlags);
 			return error;
 		}
 
@@ -798,6 +858,8 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
 			cache->ReleaseRefAndUnlock();
 			secondCache->ReleaseRefAndUnlock();
 			addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
+			free_etc(areaNewProtections, allocationFlags);
+			free_etc(secondAreaNewProtections, allocationFlags);
 			return error;
 		}
 
@@ -812,12 +874,58 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
 			&addressRestrictions, kernel, &secondArea, NULL);
 		if (error != B_OK) {
 			addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
+			free_etc(areaNewProtections, allocationFlags);
+			free_etc(secondAreaNewProtections, allocationFlags);
 			return error;
 		}
 		// We need a cache reference for the new area.
 		cache->AcquireRefLocked();
 	}
 
+	if (area->page_protections != NULL) {
+		// Copy the protection bits of the first area.
+		size_t areaBytes = (area->Size() / B_PAGE_SIZE + 1) / 2;
+		memcpy(areaNewProtections, area->page_protections, areaBytes);
+		uint8* areaOldProtections = area->page_protections;
+		area->page_protections = areaNewProtections;
+
+		// Shift the protection bits of the second area to the start of
+		// the old array.
+		size_t oldBytes = (oldSize / B_PAGE_SIZE + 1) / 2;
+		addr_t secondAreaOffset = secondBase - area->Base();
+		ssize_t secondAreaPagesShifted = secondAreaOffset / B_PAGE_SIZE;
+		Bitmap::Shift<uint8>(areaOldProtections, oldBytes * 8,
+			-(secondAreaPagesShifted * 4));
+
+		// Copy the protection bits of the second area.
+		size_t secondAreaBytes = (secondSize / B_PAGE_SIZE + 1) / 2;
+		memcpy(secondAreaNewProtections, areaOldProtections, secondAreaBytes);
+		secondArea->page_protections = secondAreaNewProtections;
+
+		// We don't need this anymore.
+		free_etc(areaOldProtections, allocationFlags);
+
+		// Set the correct page protections for the second area.
+		VMTranslationMap* map = addressSpace->TranslationMap();
+		map->Lock();
+		page_num_t firstPageOffset
+			= secondArea->cache_offset / B_PAGE_SIZE;
+		page_num_t lastPageOffset
+			= firstPageOffset + secondArea->Size() / B_PAGE_SIZE;
+		for (VMCachePagesTree::Iterator it
+				= secondArea->cache->pages.GetIterator();
+				vm_page* page = it.Next();) {
+			if (page->cache_offset >= firstPageOffset
+				&& page->cache_offset <= lastPageOffset) {
+				addr_t address = virtual_page_address(secondArea, page);
+				uint32 pageProtection
+					= get_area_page_protection(secondArea, address);
+				map->ProtectPage(secondArea, address, pageProtection);
+			}
+		}
+		map->Unlock();
+	}
+
 	if (_secondArea != NULL)
 		*_secondArea = secondArea;
 
diff --git a/src/tests/system/kernel/mmap_cut_tests.cpp b/src/tests/system/kernel/mmap_cut_tests.cpp
index ce4da2fd4a..66a4490287 100644
--- a/src/tests/system/kernel/mmap_cut_tests.cpp
+++ b/src/tests/system/kernel/mmap_cut_tests.cpp
@@ -19,7 +19,7 @@ main()
 
 	// should fail (negative offset)
 	void* ptr0 = mmap(NULL, B_PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, -4096);
-	if (ptr0 != NULL) {
+	if (ptr0 != MAP_FAILED) {
 		printf("map-negative-offset unexpectedly succeeded!\n");
 		return -1;
 	}
@@ -39,5 +39,24 @@ main()
 		return status;
 	}
 
+	uint8* ptr3 = (uint8*)mmap(NULL, B_PAGE_SIZE * 4, PROT_NONE,
+		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+	// make the tail accessible
+	mprotect(ptr3 + B_PAGE_SIZE * 3, B_PAGE_SIZE, PROT_READ | PROT_WRITE);
+
+	// store any value
+	ptr3[B_PAGE_SIZE * 3] = 'a';
+
+	// cut the area in the middle, before the accessible tail
+	mmap(ptr3 + B_PAGE_SIZE, B_PAGE_SIZE, PROT_READ | PROT_WRITE,
+		MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+
+	// validate that this does not crash
+	if (ptr3[B_PAGE_SIZE * 3] != 'a') {
+		printf("map-protect-cut test failed!\n");
+		return -1;
+	}
+
 	return 0;
 }

############################################################################

Revision:    hrev57057
Commit:      fbcc7b2711dcb3ed611911826e5317da3dd510d5
URL:         https://git.haiku-os.org/haiku/commit/?id=fbcc7b2711dc
Author:      Augustin Cavalier <waddlesplash@gmail.com>
Date:        Wed May 31 18:47:19 2023 UTC

BScrollView: Ajust minimum dimensions as little as necessary.

If the dimensions are already larger than needed, don't add to them.
Fixes Terminal and other non-layout applications' display
following the prior change.

----------------------------------------------------------------------------

diff --git a/src/kits/interface/ScrollView.cpp b/src/kits/interface/ScrollView.cpp
index eda3f860e2..aefe77a021 100644
--- a/src/kits/interface/ScrollView.cpp
+++ b/src/kits/interface/ScrollView.cpp
@@ -918,11 +918,17 @@ BScrollView::_ComputeFrame(BRect frame, BScrollBar* horizontal,
 {
 	if (vertical != NULL) {
 		frame.right += vertical->PreferredSize().Width();
-		frame.bottom += vertical->MinSize().Height();
+
+		const float minHeight = vertical->MinSize().Height();
+		if (frame.Height() < minHeight)
+			frame.bottom += minHeight - frame.Height();
 	}
 	if (horizontal != NULL) {
-		frame.right += horizontal->MinSize().Width();
 		frame.bottom += horizontal->PreferredSize().Height();
+
+		const float minWidth = horizontal->MinSize().Width();
+		if (frame.Width() < minWidth)
+			frame.right += minWidth - frame.Width();
 	}
 
 	_InsetBorders(frame, border, borders, true);


[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic