[prev in list] [next in list] [prev in thread] [next in thread]
List: openbsd-bugs
Subject: Re: amd64 machine no longer suspends
From: Jonathan Gray <jsg () jsg ! id ! au>
Date: 2021-07-28 13:16:13
Message-ID: YQFYnaH2PSjYzkFJ () largo ! jsg ! id ! au
[Download RAW message or body]
On Tue, Jul 27, 2021 at 03:17:35PM +0200, Mark Kettenis wrote:
> > Date: Tue, 27 Jul 2021 11:38:20 +1000
> > From: Jonathan Gray <jsg@jsg.id.au>
> >
> > On Mon, Jul 26, 2021 at 08:45:24AM +0100, Edd Barrett wrote:
> > > Hi,
> > >
> > > I upgraded my snapshot last week and the machine no longer suspends. I
> > > don't know exactly which snap caused this, as I hadn't been upgrading
> > > snaps very frequently.
> > >
> > > Upon running `zzz` from a terminal emulator, the screen goes black, but
> > > the machine never enters sleep state.
> > >
> > > Upon hard-resetting the machine, fsck runs, so the disks can't have been
> > > synced.
> > >
> > > I don't have a serial port on this machine, and I don't think I can
> > > force console output out of a usb-serial adapter (can I?).
> > >
> > > dmesg follows, but please let me know what other info might be useful.
> > >
> > > Thanks.
> >
> > on a t500 running amd64 with
> > radeondrm0: RV635
> >
> > on suspend I see
> > ttm_copy_io_ttm_page: stub
> > [TTM] Buffer eviction failed
> > uhub2 detached
> > ugen0 detached
> > ugen1 detached
> > uhub3 detached
> > uhub4 detached
> > uhub8 detached
> > video0 detached
> > uvideo0 detached
> > uhub0 detached
> > uhub5 detached
> > uhub6 detached
> > uhub7 detached
> > uhub1 detached
> >
> > but it does suspend and resume
> >
> > a t42 running i386 with
> > radeondrm0: RV200
> > does not hit the ttm_copy_io_ttm_page / Buffer eviction failed path
> >
> > with the following diff t500 suspends quicker here does it change
> > anything on your machine?
> >
> > Index: sys/dev/pci/drm/include/linux/highmem.h
> > ===================================================================
> > RCS file: /cvs/src/sys/dev/pci/drm/include/linux/highmem.h,v
> > retrieving revision 1.3
> > diff -u -p -r1.3 highmem.h
> > --- sys/dev/pci/drm/include/linux/highmem.h 14 Jun 2020 15:20:07 -0000 1.3
> > +++ sys/dev/pci/drm/include/linux/highmem.h 27 Jul 2021 01:26:38 -0000
> > @@ -54,6 +54,21 @@ kunmap_atomic(void *addr)
> > #endif
> > }
> >
> > -#endif /* defined(__i386__) || defined(__amd64__) */
> > +#else /* !defined(__i386__) && !defined(__amd64__) */
> > +
> > +static inline void *
> > +kmap_atomic(struct vm_page *pg)
> > +{
> > + return NULL;
> > +}
> > +
> > +static inline void
> > +kunmap_atomic(void *addr)
> > +{
> > +}
> > +
> > +#endif
> > +
> > +#define kmap_atomic_prot(a, p) kmap_atomic(a)
> >
> > #endif
> > Index: sys/dev/pci/drm/ttm/ttm_bo_util.c
> > ===================================================================
> > RCS file: /cvs/src/sys/dev/pci/drm/ttm/ttm_bo_util.c,v
> > retrieving revision 1.28
> > diff -u -p -r1.28 ttm_bo_util.c
> > --- sys/dev/pci/drm/ttm/ttm_bo_util.c 7 Jul 2021 02:38:37 -0000 1.28
> > +++ sys/dev/pci/drm/ttm/ttm_bo_util.c 27 Jul 2021 00:40:32 -0000
> > @@ -186,9 +186,6 @@ static int ttm_copy_io_ttm_page(struct t
> > unsigned long page,
> > pgprot_t prot)
> > {
> > - STUB();
> > - return -ENOSYS;
> > -#ifdef notyet
> > struct vm_page *d = ttm->pages[page];
> > void *dst;
> >
> > @@ -205,16 +202,12 @@ static int ttm_copy_io_ttm_page(struct t
> > kunmap_atomic(dst);
> >
> > return 0;
> > -#endif
> > }
> >
> > static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
> > unsigned long page,
> > pgprot_t prot)
> > {
> > - STUB();
> > - return -ENOSYS;
> > -#ifdef notyet
> > struct vm_page *s = ttm->pages[page];
> > void *src;
> >
> > @@ -231,7 +224,6 @@ static int ttm_copy_ttm_io_page(struct t
> > kunmap_atomic(src);
> >
> > return 0;
> > -#endif
> > }
> >
> > int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
>
> So here is a better diff that does respect the page protections in
> kmap_atomic_prot(). That might be relevant since pages may need to be
> mapped uncached.
>
> I think we can get away with having only a single reserved virtual
> address for kmap_atomic_prot(), at least as long as we run the drm
> code under the kernel lock. But I added some KASSERTs to make sure
> there isn't any concurrent use of the reserved virtual address.
>
> Edd, Alf, can you verifyu that this version fixes the issue as well?
>
> Jonathan, this probably needs a bit of testing on inteldrm(4) as well.
>
I can still start Xorg on i386 (x40 with i855), and amd64
(x230 ivy bridge, x250 broadwell) with this.
ok jsg@
>
> Index: dev/pci/drm/drm_linux.c
> ===================================================================
> RCS file: /cvs/src/sys/dev/pci/drm/drm_linux.c,v
> retrieving revision 1.81
> diff -u -p -r1.81 drm_linux.c
> --- dev/pci/drm/drm_linux.c 26 Jul 2021 06:24:22 -0000 1.81
> +++ dev/pci/drm/drm_linux.c 27 Jul 2021 13:10:59 -0000
> @@ -571,6 +571,29 @@ kunmap_va(void *addr)
> #endif
> }
>
> +vaddr_t kmap_atomic_va;
> +int kmap_atomic_inuse;
> +
> +void *
> +kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
> +{
> + KASSERT(!kmap_atomic_inuse);
> +
> + kmap_atomic_inuse = 1;
> + pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
> + PROT_READ | PROT_WRITE);
> + return (void *)kmap_atomic_va;
> +}
> +
> +void
> +kunmap_atomic(void *addr)
> +{
> + KASSERT(kmap_atomic_inuse);
> +
> + pmap_kremove(kmap_atomic_va, PAGE_SIZE);
> + kmap_atomic_inuse = 0;
> +}
> +
> void *
> vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
> pgprot_t prot)
> @@ -2348,6 +2371,9 @@ drm_linux_init(void)
>
> pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
> "idrpl", NULL);
> +
> + kmap_atomic_va =
> + (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
> }
>
> void
> Index: dev/pci/drm/include/linux/highmem.h
> ===================================================================
> RCS file: /cvs/src/sys/dev/pci/drm/include/linux/highmem.h,v
> retrieving revision 1.3
> diff -u -p -r1.3 highmem.h
> --- dev/pci/drm/include/linux/highmem.h 14 Jun 2020 15:20:07 -0000 1.3
> +++ dev/pci/drm/include/linux/highmem.h 27 Jul 2021 13:11:00 -0000
> @@ -21,39 +21,20 @@
> #include <sys/param.h>
> #include <uvm/uvm_extern.h>
> #include <linux/uaccess.h>
> +#include <asm/pgtable.h>
>
> void *kmap(struct vm_page *);
> -void kunmap_va(void *addr);
> +void kunmap_va(void *addr);
>
> #define kmap_to_page(ptr) (ptr)
>
> -#if defined(__i386__) || defined(__amd64__)
> +void *kmap_atomic_prot(struct vm_page *, pgprot_t);
> +void kunmap_atomic(void *);
>
> static inline void *
> kmap_atomic(struct vm_page *pg)
> {
> - vaddr_t va;
> -
> -#if defined (__HAVE_PMAP_DIRECT)
> - va = pmap_map_direct(pg);
> -#else
> - extern vaddr_t pmap_tmpmap_pa(paddr_t);
> - va = pmap_tmpmap_pa(VM_PAGE_TO_PHYS(pg));
> -#endif
> - return (void *)va;
> + return kmap_atomic_prot(pg, PAGE_KERNEL);
> }
> -
> -static inline void
> -kunmap_atomic(void *addr)
> -{
> -#if defined (__HAVE_PMAP_DIRECT)
> - pmap_unmap_direct((vaddr_t)addr);
> -#else
> - extern void pmap_tmpunmap_pa(void);
> - pmap_tmpunmap_pa();
> -#endif
> -}
> -
> -#endif /* defined(__i386__) || defined(__amd64__) */
>
> #endif
> Index: dev/pci/drm/ttm/ttm_bo_util.c
> ===================================================================
> RCS file: /cvs/src/sys/dev/pci/drm/ttm/ttm_bo_util.c,v
> retrieving revision 1.28
> diff -u -p -r1.28 ttm_bo_util.c
> --- dev/pci/drm/ttm/ttm_bo_util.c 7 Jul 2021 02:38:37 -0000 1.28
> +++ dev/pci/drm/ttm/ttm_bo_util.c 27 Jul 2021 13:11:00 -0000
> @@ -186,9 +186,6 @@ static int ttm_copy_io_ttm_page(struct t
> unsigned long page,
> pgprot_t prot)
> {
> - STUB();
> - return -ENOSYS;
> -#ifdef notyet
> struct vm_page *d = ttm->pages[page];
> void *dst;
>
> @@ -205,16 +202,12 @@ static int ttm_copy_io_ttm_page(struct t
> kunmap_atomic(dst);
>
> return 0;
> -#endif
> }
>
> static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
> unsigned long page,
> pgprot_t prot)
> {
> - STUB();
> - return -ENOSYS;
> -#ifdef notyet
> struct vm_page *s = ttm->pages[page];
> void *src;
>
> @@ -231,7 +224,6 @@ static int ttm_copy_ttm_io_page(struct t
> kunmap_atomic(src);
>
> return 0;
> -#endif
> }
>
> int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
>
>
[prev in list] [next in list] [prev in thread] [next in thread]
Configure |
About |
News |
Add a list |
Sponsored by KoreLogic