lkml.org 
[lkml]   [2020]   [Jun]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 5/8] selftests/harness: Switch to TAP output
    Date
    Using the kselftest_harness.h would result in non-TAP test reporting,
    which didn't make much sense given that all the requirements for using
    the low-level API were met. Switch to using ksft_*() helpers while
    retaining as much of a human-readability as possible.

    Signed-off-by: Kees Cook <keescook@chromium.org>
    ---
    tools/testing/selftests/kselftest.h | 5 +-
    tools/testing/selftests/kselftest_harness.h | 52 ++++++++++++---------
    2 files changed, 33 insertions(+), 24 deletions(-)

    diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
    index 2cd5eefed4d2..9b4efdbb07f6 100644
    --- a/tools/testing/selftests/kselftest.h
    +++ b/tools/testing/selftests/kselftest.h
    @@ -1,7 +1,8 @@
    /* SPDX-License-Identifier: GPL-2.0 */
    /*
    - * kselftest.h: kselftest framework return codes to include from
    - * selftests.
    + * kselftest.h: low-level kselftest framework to include from
    + * selftest programs. When possible, please use
    + * kselftest_harness.h instead.
    *
    * Copyright (c) 2014 Shuah Khan <shuahkh@osg.samsung.com>
    * Copyright (c) 2014 Samsung Electronics Co., Ltd.
    diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
    index c9f03ef93338..f8f7e47c739a 100644
    --- a/tools/testing/selftests/kselftest_harness.h
    +++ b/tools/testing/selftests/kselftest_harness.h
    @@ -50,7 +50,9 @@
    #ifndef __KSELFTEST_HARNESS_H
    #define __KSELFTEST_HARNESS_H

    +#ifndef _GNU_SOURCE
    #define _GNU_SOURCE
    +#endif
    #include <asm/types.h>
    #include <errno.h>
    #include <stdbool.h>
    @@ -62,6 +64,8 @@
    #include <sys/wait.h>
    #include <unistd.h>

    +#include "kselftest.h"
    +
    #define TEST_TIMEOUT_DEFAULT 30

    /* Utilities exposed to the test definitions */
    @@ -104,7 +108,7 @@

    /* Unconditional logger for internal use. */
    #define __TH_LOG(fmt, ...) \
    - fprintf(TH_LOG_STREAM, "%s:%d:%s:" fmt "\n", \
    + fprintf(TH_LOG_STREAM, "# %s:%d:%s:" fmt "\n", \
    __FILE__, __LINE__, _metadata->name, ##__VA_ARGS__)

    /**
    @@ -119,7 +123,7 @@
    */
    #define XFAIL(statement, fmt, ...) do { \
    if (TH_LOG_ENABLED) { \
    - fprintf(TH_LOG_STREAM, "[ XFAIL! ] " fmt "\n", \
    + fprintf(TH_LOG_STREAM, "# XFAIL " fmt "\n", \
    ##__VA_ARGS__); \
    } \
    /* TODO: find a way to pass xfail to test runner process. */ \
    @@ -813,12 +817,12 @@ static void __timeout_handler(int sig, siginfo_t *info, void *ucontext)
    /* Sanity check handler execution environment. */
    if (!t) {
    fprintf(TH_LOG_STREAM,
    - "no active test in SIGALRM handler!?\n");
    + "# no active test in SIGALRM handler!?\n");
    abort();
    }
    if (sig != SIGALRM || sig != info->si_signo) {
    fprintf(TH_LOG_STREAM,
    - "%s: SIGALRM handler caught signal %d!?\n",
    + "# %s: SIGALRM handler caught signal %d!?\n",
    t->name, sig != SIGALRM ? sig : info->si_signo);
    abort();
    }
    @@ -839,7 +843,7 @@ void __wait_for_test(struct __test_metadata *t)
    if (sigaction(SIGALRM, &action, &saved_action)) {
    t->passed = 0;
    fprintf(TH_LOG_STREAM,
    - "%s: unable to install SIGALRM handler\n",
    + "# %s: unable to install SIGALRM handler\n",
    t->name);
    return;
    }
    @@ -851,7 +855,7 @@ void __wait_for_test(struct __test_metadata *t)
    if (sigaction(SIGALRM, &saved_action, NULL)) {
    t->passed = 0;
    fprintf(TH_LOG_STREAM,
    - "%s: unable to uninstall SIGALRM handler\n",
    + "# %s: unable to uninstall SIGALRM handler\n",
    t->name);
    return;
    }
    @@ -860,18 +864,17 @@ void __wait_for_test(struct __test_metadata *t)
    if (t->timed_out) {
    t->passed = 0;
    fprintf(TH_LOG_STREAM,
    - "%s: Test terminated by timeout\n", t->name);
    + "# %s: Test terminated by timeout\n", t->name);
    } else if (WIFEXITED(status)) {
    t->passed = t->termsig == -1 ? !WEXITSTATUS(status) : 0;
    if (t->termsig != -1) {
    fprintf(TH_LOG_STREAM,
    - "%s: Test exited normally "
    - "instead of by signal (code: %d)\n",
    + "# %s: Test exited normally instead of by signal (code: %d)\n",
    t->name,
    WEXITSTATUS(status));
    } else if (!t->passed) {
    fprintf(TH_LOG_STREAM,
    - "%s: Test failed at step #%d\n",
    + "# %s: Test failed at step #%d\n",
    t->name,
    WEXITSTATUS(status));
    }
    @@ -879,20 +882,19 @@ void __wait_for_test(struct __test_metadata *t)
    t->passed = 0;
    if (WTERMSIG(status) == SIGABRT) {
    fprintf(TH_LOG_STREAM,
    - "%s: Test terminated by assertion\n",
    + "# %s: Test terminated by assertion\n",
    t->name);
    } else if (WTERMSIG(status) == t->termsig) {
    t->passed = 1;
    } else {
    fprintf(TH_LOG_STREAM,
    - "%s: Test terminated unexpectedly "
    - "by signal %d\n",
    + "# %s: Test terminated unexpectedly by signal %d\n",
    t->name,
    WTERMSIG(status));
    }
    } else {
    fprintf(TH_LOG_STREAM,
    - "%s: Test ended in some other way [%u]\n",
    + "# %s: Test ended in some other way [%u]\n",
    t->name,
    status);
    }
    @@ -908,11 +910,11 @@ void __run_test(struct __fixture_metadata *f,
    t->step = 0;
    t->no_print = 0;

    - printf("[ RUN ] %s%s%s.%s\n",
    + ksft_print_msg(" RUN %s%s%s.%s ...\n",
    f->name, variant->name[0] ? "." : "", variant->name, t->name);
    t->pid = fork();
    if (t->pid < 0) {
    - printf("ERROR SPAWNING TEST CHILD\n");
    + ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
    t->passed = 0;
    } else if (t->pid == 0) {
    t->fn(t, variant);
    @@ -921,7 +923,9 @@ void __run_test(struct __fixture_metadata *f,
    } else {
    __wait_for_test(t);
    }
    - printf("[ %4s ] %s%s%s.%s\n", (t->passed ? "OK" : "FAIL"),
    + ksft_print_msg(" %4s %s%s%s.%s\n", t->passed ? "OK" : "FAIL",
    + f->name, variant->name[0] ? "." : "", variant->name, t->name);
    + ksft_test_result(t->passed, "%s%s%s.%s\n",
    f->name, variant->name[0] ? "." : "", variant->name, t->name);
    }

    @@ -945,8 +949,9 @@ static int test_harness_run(int __attribute__((unused)) argc,
    }
    }

    - /* TODO(wad) add optional arguments similar to gtest. */
    - printf("[==========] Running %u tests from %u test cases.\n",
    + ksft_print_header();
    + ksft_set_plan(test_count);
    + ksft_print_msg("Starting %u tests from %u test cases.\n",
    test_count, case_count);
    for (f = __fixture_list; f; f = f->next) {
    for (v = f->variant ?: &no_variant; v; v = v->next) {
    @@ -960,9 +965,12 @@ static int test_harness_run(int __attribute__((unused)) argc,
    }
    }
    }
    - printf("[==========] %u / %u tests passed.\n", pass_count, count);
    - printf("[ %s ]\n", (ret ? "FAILED" : "PASSED"));
    - return ret;
    + ksft_print_msg("%s: %u / %u tests passed.\n", ret ? "FAILED" : "PASSED",
    + pass_count, count);
    + ksft_exit(ret == 0);
    +
    + /* unreachable */
    + return KSFT_FAIL;
    }

    static void __attribute__((constructor)) __constructor_order_first(void)
    --
    2.25.1
    \
     
     \ /
      Last update: 2020-06-22 20:17    [W:4.732 / U:0.104 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site