sparc64: vdso: Switch to the generic vDSO library

The generic vDSO provides a lot common functionality shared between
different architectures. SPARC is the last architecture not using it,
preventing some necessary code cleanup.

Make use of the generic infrastructure.

Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@kernel.org>
Tested-by: Andreas Larsson <andreas@gaisler.com>
Reviewed-by: Andreas Larsson <andreas@gaisler.com>
Acked-by: Andreas Larsson <andreas@gaisler.com>
Link: https://patch.msgid.link/20260304-vdso-sparc64-generic-2-v6-10-d8eb3b0e1410@linutronix.de
This commit is contained in:
Thomas Weißschuh 2026-03-04 08:49:07 +01:00 committed by Thomas Gleixner
parent e13e3059dc
commit 7c5fc16c7a
13 changed files with 123 additions and 380 deletions

View File

@ -104,7 +104,6 @@ config SPARC64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select GENERIC_TIME_VSYSCALL
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_PTE_SPECIAL
select PCI_DOMAINS if PCI
select ARCH_HAS_GIGANTIC_PAGE
@ -115,6 +114,8 @@ config SPARC64
select ARCH_SUPPORTS_SCHED_SMT if SMP
select ARCH_SUPPORTS_SCHED_MC if SMP
select ARCH_HAS_LAZY_MMU_MODE
select HAVE_GENERIC_VDSO
select GENERIC_GETTIMEOFDAY
config ARCH_PROC_KCORE_TEXT
def_bool y

View File

@ -5,13 +5,4 @@
#ifndef _ASM_SPARC_CLOCKSOURCE_H
#define _ASM_SPARC_CLOCKSOURCE_H
/* VDSO clocksources */
#define VCLOCK_NONE 0 /* Nothing userspace can do. */
#define VCLOCK_TICK 1 /* Use %tick. */
#define VCLOCK_STICK 2 /* Use %stick. */
struct arch_clocksource_data {
int vclock_mode;
};
#endif /* _ASM_SPARC_CLOCKSOURCE_H */

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSO_CLOCKSOURCE_H
#define __ASM_VDSO_CLOCKSOURCE_H
/* VDSO clocksources */
#define VDSO_ARCH_CLOCKMODES \
VDSO_CLOCKMODE_TICK, \
VDSO_CLOCKMODE_STICK
#endif /* __ASM_VDSO_CLOCKSOURCE_H */

View File

@ -9,15 +9,14 @@
#include <uapi/linux/time.h>
#include <uapi/linux/unistd.h>
#include <vdso/align.h>
#include <vdso/clocksource.h>
#include <vdso/datapage.h>
#include <vdso/page.h>
#include <linux/types.h>
#include <asm/vvar.h>
#ifdef CONFIG_SPARC64
static __always_inline u64 vdso_shift_ns(u64 val, u32 amt)
{
return val >> amt;
}
static __always_inline u64 vread_tick(void)
{
u64 ret;
@ -48,6 +47,7 @@ static __always_inline u64 vdso_shift_ns(u64 val, u32 amt)
: "g1");
return ret;
}
#define vdso_shift_ns vdso_shift_ns
static __always_inline u64 vread_tick(void)
{
@ -70,9 +70,9 @@ static __always_inline u64 vread_tick_stick(void)
}
#endif
static __always_inline u64 __arch_get_hw_counter(struct vvar_data *vvar)
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_time_data *vd)
{
if (likely(vvar->vclock_mode == VCLOCK_STICK))
if (likely(clock_mode == VDSO_CLOCKMODE_STICK))
return vread_tick_stick();
else
return vread_tick();
@ -102,7 +102,7 @@ static __always_inline u64 __arch_get_hw_counter(struct vvar_data *vvar)
"cc", "memory"
static __always_inline
long clock_gettime_fallback(clockid_t clock, struct __kernel_old_timespec *ts)
long clock_gettime_fallback(clockid_t clock, struct __kernel_timespec *ts)
{
register long num __asm__("g1") = __NR_clock_gettime;
register long o0 __asm__("o0") = clock;
@ -113,6 +113,20 @@ long clock_gettime_fallback(clockid_t clock, struct __kernel_old_timespec *ts)
return o0;
}
#ifndef CONFIG_SPARC64
static __always_inline
long clock_gettime32_fallback(clockid_t clock, struct old_timespec32 *ts)
{
register long num __asm__("g1") = __NR_clock_gettime;
register long o0 __asm__("o0") = clock;
register long o1 __asm__("o1") = (long) ts;
__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
"0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
return o0;
}
#endif
static __always_inline
long gettimeofday_fallback(struct __kernel_old_timeval *tv, struct timezone *tz)
{
@ -125,4 +139,30 @@ long gettimeofday_fallback(struct __kernel_old_timeval *tv, struct timezone *tz)
return o0;
}
static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
unsigned long ret;
/*
* SPARC does not support native PC-relative code relocations.
* Calculate the address manually, works for 32 and 64 bit code.
*/
__asm__ __volatile__(
"1:\n"
"call 3f\n" // Jump over the embedded data and set up %o7
"nop\n" // Delay slot
"2:\n"
".word vdso_u_time_data - .\n" // Embedded offset to external symbol
"3:\n"
"add %%o7, 2b - 1b, %%o7\n" // Point %o7 to the embedded offset
"ldsw [%%o7], %0\n" // Load the offset
"add %0, %%o7, %0\n" // Calculate the absolute address
: "=r" (ret)
:
: "o7");
return (const struct vdso_time_data *)ret;
}
#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
#endif /* _ASM_SPARC_VDSO_GETTIMEOFDAY_H */

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SPARC_VDSO_VSYSCALL_H
#define _ASM_SPARC_VDSO_VSYSCALL_H
#define __VDSO_PAGES 4
#include <asm-generic/vdso/vsyscall.h>
#endif /* _ASM_SPARC_VDSO_VSYSCALL_H */

View File

@ -1,75 +0,0 @@
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _ASM_SPARC_VVAR_DATA_H
#define _ASM_SPARC_VVAR_DATA_H
#include <asm/clocksource.h>
#include <asm/processor.h>
#include <asm/barrier.h>
#include <linux/time.h>
#include <linux/types.h>
struct vvar_data {
unsigned int seq;
int vclock_mode;
struct { /* extract of a clocksource struct */
u64 cycle_last;
u64 mask;
int mult;
int shift;
} clock;
/* open coded 'struct timespec' */
u64 wall_time_sec;
u64 wall_time_snsec;
u64 monotonic_time_snsec;
u64 monotonic_time_sec;
u64 monotonic_time_coarse_sec;
u64 monotonic_time_coarse_nsec;
u64 wall_time_coarse_sec;
u64 wall_time_coarse_nsec;
int tz_minuteswest;
int tz_dsttime;
};
extern struct vvar_data *vvar_data;
extern int vdso_fix_stick;
static inline unsigned int vvar_read_begin(const struct vvar_data *s)
{
unsigned int ret;
repeat:
ret = READ_ONCE(s->seq);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
smp_rmb(); /* Finish all reads before we return seq */
return ret;
}
static inline int vvar_read_retry(const struct vvar_data *s,
unsigned int start)
{
smp_rmb(); /* Finish all reads before checking the value of seq */
return unlikely(s->seq != start);
}
static inline void vvar_write_begin(struct vvar_data *s)
{
++s->seq;
smp_wmb(); /* Makes sure that increment of seq is reflected */
}
static inline void vvar_write_end(struct vvar_data *s)
{
smp_wmb(); /* Makes the value of seq current before we increment */
++s->seq;
}
#endif /* _ASM_SPARC_VVAR_DATA_H */

View File

@ -41,7 +41,6 @@ obj-$(CONFIG_SPARC32) += systbls_32.o
obj-y += time_$(BITS).o
obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o
obj-$(CONFIG_SPARC64) += vdso.o
obj-$(CONFIG_SPARC32) += devices.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o

View File

@ -838,14 +838,14 @@ void __init time_init_early(void)
if (tlb_type == spitfire) {
if (is_hummingbird()) {
init_tick_ops(&hbtick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_NONE;
clocksource_tick.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
} else {
init_tick_ops(&tick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
clocksource_tick.vdso_clock_mode = VDSO_CLOCKMODE_TICK;
}
} else {
init_tick_ops(&stick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_STICK;
clocksource_tick.vdso_clock_mode = VDSO_CLOCKMODE_STICK;
}
}

View File

@ -1,69 +0,0 @@
/*
* Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright 2003 Andi Kleen, SuSE Labs.
*
* Thanks to hpa@transmeta.com for some useful hint.
* Special thanks to Ingo Molnar for his early experience with
* a different vsyscall implementation for Linux/IA32 and for the name.
*/
#include <linux/time.h>
#include <linux/timekeeper_internal.h>
#include <asm/vvar.h>
void update_vsyscall_tz(void)
{
if (unlikely(vvar_data == NULL))
return;
vvar_data->tz_minuteswest = sys_tz.tz_minuteswest;
vvar_data->tz_dsttime = sys_tz.tz_dsttime;
}
void update_vsyscall(struct timekeeper *tk)
{
struct vvar_data *vdata = vvar_data;
if (unlikely(vdata == NULL))
return;
vvar_write_begin(vdata);
vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
vdata->clock.mask = tk->tkr_mono.mask;
vdata->clock.mult = tk->tkr_mono.mult;
vdata->clock.shift = tk->tkr_mono.shift;
vdata->wall_time_sec = tk->xtime_sec;
vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
vdata->monotonic_time_sec = tk->xtime_sec +
tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec +
(tk->wall_to_monotonic.tv_nsec <<
tk->tkr_mono.shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
vdata->monotonic_time_sec++;
}
vdata->wall_time_coarse_sec = tk->xtime_sec;
vdata->wall_time_coarse_nsec =
(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdata->monotonic_time_coarse_sec =
vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_coarse_nsec =
vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
vdata->monotonic_time_coarse_sec++;
}
vvar_write_end(vdata);
}

View File

@ -3,6 +3,9 @@
# Building vDSO images for sparc.
#
# Include the generic Makefile to check the built vDSO:
include $(srctree)/lib/vdso/Makefile.include
# files to link into the vdso
vobjs-y := vdso-note.o vclock_gettime.o
@ -105,6 +108,7 @@ $(obj)/vdso32.so.dbg: FORCE \
quiet_cmd_vdso = VDSO $@
cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-T $(filter %.lds,$^) $(filter %.o,$^)
-T $(filter %.lds,$^) $(filter %.o,$^); \
$(cmd_vdso_check)
VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic --no-undefined -z noexecstack

View File

@ -12,169 +12,40 @@
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/io.h>
#include <asm/timex.h>
#include <asm/clocksource.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <vdso/gettime.h>
#include <asm/vdso/gettimeofday.h>
#include <asm/vvar.h>
/*
* Compute the vvar page's address in the process address space, and return it
* as a pointer to the vvar_data.
*/
notrace static __always_inline struct vvar_data *get_vvar_data(void)
#include "../../../../lib/vdso/gettimeofday.c"
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
unsigned long ret;
/*
* vdso data page is the first vDSO page so grab the PC
* and move up a page to get to the data page.
*/
__asm__("rd %%pc, %0" : "=r" (ret));
ret &= ~(8192 - 1);
ret -= 8192;
return (struct vvar_data *) ret;
return __cvdso_gettimeofday(tv, tz);
}
notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
{
u64 v;
u64 cycles = __arch_get_hw_counter(vvar);
int gettimeofday(struct __kernel_old_timeval *, struct timezone *)
__weak __alias(__vdso_gettimeofday);
v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
return v * vvar->clock.mult;
#if defined(CONFIG_SPARC64)
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime(clock, ts);
}
notrace static __always_inline int do_realtime(struct vvar_data *vvar,
struct __kernel_old_timespec *ts)
int clock_gettime(clockid_t, struct __kernel_timespec *)
__weak __alias(__vdso_clock_gettime);
#else
int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts)
{
unsigned long seq;
u64 ns;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->wall_time_sec;
ns = vvar->wall_time_snsec;
ns += vgetsns(vvar);
ns = vdso_shift_ns(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
return __cvdso_clock_gettime32(clock, ts);
}
notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
struct __kernel_old_timespec *ts)
{
unsigned long seq;
u64 ns;
int clock_gettime(clockid_t, struct old_timespec32 *)
__weak __alias(__vdso_clock_gettime);
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->monotonic_time_sec;
ns = vvar->monotonic_time_snsec;
ns += vgetsns(vvar);
ns = vdso_shift_ns(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
notrace static int do_realtime_coarse(struct vvar_data *vvar,
struct __kernel_old_timespec *ts)
{
unsigned long seq;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->wall_time_coarse_sec;
ts->tv_nsec = vvar->wall_time_coarse_nsec;
} while (unlikely(vvar_read_retry(vvar, seq)));
return 0;
}
notrace static int do_monotonic_coarse(struct vvar_data *vvar,
struct __kernel_old_timespec *ts)
{
unsigned long seq;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->monotonic_time_coarse_sec;
ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
} while (unlikely(vvar_read_retry(vvar, seq)));
return 0;
}
notrace int
__vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
{
struct vvar_data *vvd = get_vvar_data();
switch (clock) {
case CLOCK_REALTIME:
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
break;
return do_realtime(vvd, ts);
case CLOCK_MONOTONIC:
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
break;
return do_monotonic(vvd, ts);
case CLOCK_REALTIME_COARSE:
return do_realtime_coarse(vvd, ts);
case CLOCK_MONOTONIC_COARSE:
return do_monotonic_coarse(vvd, ts);
}
/*
* Unknown clock ID ? Fall back to the syscall.
*/
return clock_gettime_fallback(clock, ts);
}
int
clock_gettime(clockid_t, struct __kernel_old_timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int
__vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
struct vvar_data *vvd = get_vvar_data();
if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
if (likely(tv != NULL)) {
union tstv_t {
struct __kernel_old_timespec ts;
struct __kernel_old_timeval tv;
} *tstv = (union tstv_t *) tv;
do_realtime(vvd, &tstv->ts);
/*
* Assign before dividing to ensure that the division is
* done in the type of tv_usec, not tv_nsec.
*
* There cannot be > 1 billion usec in a second:
* do_realtime() has already distributed such overflow
* into tv_sec. So we can assign it to an int safely.
*/
tstv->tv.tv_usec = tstv->ts.tv_nsec;
tstv->tv.tv_usec /= 1000;
}
if (unlikely(tz != NULL)) {
/* Avoid memcpy. Some old compilers fail to inline it */
tz->tz_minuteswest = vvd->tz_minuteswest;
tz->tz_dsttime = vvd->tz_dsttime;
}
return 0;
}
return gettimeofday_fallback(tv, tz);
}
int
gettimeofday(struct __kernel_old_timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
#endif

View File

@ -4,6 +4,10 @@
* This script controls its layout.
*/
#include <vdso/datapage.h>
#include <vdso/page.h>
#include <asm/vdso/vsyscall.h>
SECTIONS
{
/*
@ -13,8 +17,7 @@ SECTIONS
* segment. Page size is 8192 for both 64-bit and 32-bit vdso binaries
*/
vvar_start = . -8192;
vvar_data = vvar_start;
VDSO_VVAR_SYMS
. = SIZEOF_HEADERS;

View File

@ -16,17 +16,16 @@
#include <linux/linkage.h>
#include <linux/random.h>
#include <linux/elf.h>
#include <linux/vdso_datastore.h>
#include <asm/cacheflush.h>
#include <asm/spitfire.h>
#include <asm/vdso.h>
#include <asm/vvar.h>
#include <asm/page.h>
unsigned int __read_mostly vdso_enabled = 1;
#include <vdso/datapage.h>
#include <asm/vdso/vsyscall.h>
static struct vm_special_mapping vvar_mapping = {
.name = "[vvar]"
};
unsigned int __read_mostly vdso_enabled = 1;
#ifdef CONFIG_SPARC64
static struct vm_special_mapping vdso_mapping64 = {
@ -40,10 +39,8 @@ static struct vm_special_mapping vdso_mapping32 = {
};
#endif
struct vvar_data *vvar_data;
/*
* Allocate pages for the vdso and vvar, and copy in the vdso text from the
* Allocate pages for the vdso and copy in the vdso text from the
* kernel image.
*/
static int __init init_vdso_image(const struct vdso_image *image,
@ -51,9 +48,8 @@ static int __init init_vdso_image(const struct vdso_image *image,
bool elf64)
{
int cnpages = (image->size) / PAGE_SIZE;
struct page *dp, **dpp = NULL;
struct page *cp, **cpp = NULL;
int i, dnpages = 0;
int i;
/*
* First, the vdso text. This is initialied data, an integral number of
@ -76,31 +72,6 @@ static int __init init_vdso_image(const struct vdso_image *image,
copy_page(page_address(cp), image->data + i * PAGE_SIZE);
}
/*
* Now the vvar page. This is uninitialized data.
*/
if (vvar_data == NULL) {
dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
if (WARN_ON(dnpages != 1))
goto oom;
dpp = kzalloc_objs(struct page *, dnpages);
vvar_mapping.pages = dpp;
if (!dpp)
goto oom;
dp = alloc_page(GFP_KERNEL);
if (!dp)
goto oom;
dpp[0] = dp;
vvar_data = page_address(dp);
memset(vvar_data, 0, PAGE_SIZE);
vvar_data->seq = 0;
}
return 0;
oom:
if (cpp != NULL) {
@ -112,15 +83,6 @@ static int __init init_vdso_image(const struct vdso_image *image,
vdso_mapping->pages = NULL;
}
if (dpp != NULL) {
for (i = 0; i < dnpages; i++) {
if (dpp[i] != NULL)
__free_page(dpp[i]);
}
kfree(dpp);
vvar_mapping.pages = NULL;
}
pr_warn("Cannot allocate vdso\n");
vdso_enabled = 0;
return -ENOMEM;
@ -155,9 +117,12 @@ static unsigned long vdso_addr(unsigned long start, unsigned int len)
return start + (offset << PAGE_SHIFT);
}
static_assert(VDSO_NR_PAGES == __VDSO_PAGES);
static int map_vdso(const struct vdso_image *image,
struct vm_special_mapping *vdso_mapping)
{
const size_t area_size = image->size + VDSO_NR_PAGES * PAGE_SIZE;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long text_start, addr = 0;
@ -170,23 +135,20 @@ static int map_vdso(const struct vdso_image *image,
* region is free.
*/
if (current->flags & PF_RANDOMIZE) {
addr = get_unmapped_area(NULL, 0,
image->size - image->sym_vvar_start,
0, 0);
addr = get_unmapped_area(NULL, 0, area_size, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
addr = vdso_addr(addr, image->size - image->sym_vvar_start);
addr = vdso_addr(addr, area_size);
}
addr = get_unmapped_area(NULL, addr,
image->size - image->sym_vvar_start, 0, 0);
addr = get_unmapped_area(NULL, addr, area_size, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
text_start = addr - image->sym_vvar_start;
text_start = addr + VDSO_NR_PAGES * PAGE_SIZE;
current->mm->context.vdso = (void __user *)text_start;
/*
@ -204,11 +166,7 @@ static int map_vdso(const struct vdso_image *image,
goto up_fail;
}
vma = _install_special_mapping(mm,
addr,
-image->sym_vvar_start,
VM_READ|VM_MAYREAD,
&vvar_mapping);
vma = vdso_install_vvar_mapping(mm, addr);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);