Discussion:
[libseccomp-discuss] [PATCH] all: fix a number of small bugs found by Coverity
Paul Moore
2014-08-29 19:11:22 UTC
Permalink
Also display the build revision to make things easier when submitting
builds for scanning.

Signed-off-by: Paul Moore <***@redhat.com>
---
Makefile.am | 11 ++++++++---
src/api.c | 12 ++++++++----
src/arch-syscall-check.c | 20 +++++++++++---------
src/db.c | 5 +++++
src/gen_bpf.c | 24 ++++++++++++++----------
5 files changed, 46 insertions(+), 26 deletions(-)

diff --git a/Makefile.am b/Makefile.am
index 7fe0787..71fde48 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -42,10 +42,15 @@ endif

if COVERITY
coverity-tarball: coverity-build
- @git rev-parse HEAD &> /dev/null && \
- rev=$$(git rev-parse HEAD | cut -c1-8) || \
- rev=$$(date --iso-8601=date); \
+ @if git rev-parse HEAD &> /dev/null; then \
+ rev_full=$$(git rev-parse HEAD); \
+ rev=$$(echo $$rev_full | cut -c1-8); \
+ else \
+ rev_full=$$(date --iso-8601=date); \
+ rev=$$rev_full; \
+ fi; \
tar czf libseccomp-coverity_$$rev.tar.gz cov-int; \
+ echo " HEAD revision: $$rev_full"; \
ls -l libseccomp-coverity_$$rev.tar.gz
endif

diff --git a/src/api.c b/src/api.c
index b54506f..684c0e7 100644
--- a/src/api.c
+++ b/src/api.c
@@ -516,7 +516,8 @@ API int seccomp_rule_add_array(scmp_filter_ctx ctx,
unsigned int arg_cnt,
const struct scmp_arg_cmp *arg_array)
{
- if (arg_cnt < 0 || arg_cnt > ARG_COUNT_MAX)
+ /* arg_cnt is unsigned, so no need to check the lower bound */
+ if (arg_cnt > ARG_COUNT_MAX)
return -EINVAL;

return _seccomp_rule_add((struct db_filter_col *)ctx,
@@ -533,7 +534,8 @@ API int seccomp_rule_add(scmp_filter_ctx ctx,
struct scmp_arg_cmp arg_array[ARG_COUNT_MAX];
va_list arg_list;

- if (arg_cnt < 0 || arg_cnt > ARG_COUNT_MAX)
+ /* arg_cnt is unsigned, so no need to check the lower bound */
+ if (arg_cnt > ARG_COUNT_MAX)
return -EINVAL;

va_start(arg_list, arg_cnt);
@@ -551,7 +553,8 @@ API int seccomp_rule_add_exact_array(scmp_filter_ctx ctx,
unsigned int arg_cnt,
const struct scmp_arg_cmp *arg_array)
{
- if (arg_cnt < 0 || arg_cnt > ARG_COUNT_MAX)
+ /* arg_cnt is unsigned, so no need to check the lower bound */
+ if (arg_cnt > ARG_COUNT_MAX)
return -EINVAL;

return _seccomp_rule_add((struct db_filter_col *)ctx,
@@ -568,7 +571,8 @@ API int seccomp_rule_add_exact(scmp_filter_ctx ctx,
struct scmp_arg_cmp arg_array[ARG_COUNT_MAX];
va_list arg_list;

- if (arg_cnt < 0 || arg_cnt > ARG_COUNT_MAX)
+ /* arg_cnt is unsigned, so no need to check the lower bound */
+ if (arg_cnt > ARG_COUNT_MAX)
return -EINVAL;

va_start(arg_list, arg_cnt);
diff --git a/src/arch-syscall-check.c b/src/arch-syscall-check.c
index 379af6e..a074c9d 100644
--- a/src/arch-syscall-check.c
+++ b/src/arch-syscall-check.c
@@ -67,14 +67,16 @@ int main(int argc, char *argv[])
int i_mips = 0;
int i_mips64 = 0;
int i_mips64n32 = 0;
- const char *sys_name, *tmp;
+ const char *sys_name;
char str_miss[256];

do {
str_miss[0] = '\0';
- tmp = x86_syscall_iterate_name(i_x86);
- if (tmp)
- sys_name = tmp;
+ sys_name = x86_syscall_iterate_name(i_x86);
+ if (sys_name == NULL) {
+ printf("FAULT\n");
+ return 1;
+ }

/* check each arch using x86 as the reference */
syscall_check(str_miss, sys_name, "x86_64",
@@ -122,9 +124,9 @@ int main(int argc, char *argv[])
i_mips >= 0 && i_mips64 >= 0 && i_mips64n32 >= 0);

/* check for any leftovers */
- tmp = x86_syscall_iterate_name(i_x86 + 1);
- if (tmp) {
- printf("%s: ERROR, x86 has additional syscalls\n", tmp);
+ sys_name = x86_syscall_iterate_name(i_x86 + 1);
+ if (sys_name) {
+ printf("%s: ERROR, x86 has additional syscalls\n", sys_name);
return 1;
}
if (i_x86_64 >= 0) {
@@ -154,12 +156,12 @@ int main(int argc, char *argv[])
}
if (i_mips64 >= 0) {
printf("%s: ERROR, mips64 has additional syscalls\n",
- mips64_syscall_iterate_name(i_mips));
+ mips64_syscall_iterate_name(i_mips64));
return 1;
}
if (i_mips64n32 >= 0) {
printf("%s: ERROR, mips64n32 has additional syscalls\n",
- mips64n32_syscall_iterate_name(i_mips));
+ mips64n32_syscall_iterate_name(i_mips64n32));
return 1;
}

diff --git a/src/db.c b/src/db.c
index 47a94f9..9923442 100644
--- a/src/db.c
+++ b/src/db.c
@@ -565,6 +565,7 @@ int db_col_attr_get(const struct db_filter_col *col,
break;
case SCMP_FLTATR_CTL_TSYNC:
*value = col->attr.tsync_enable;
+ break;
default:
rc = -EEXIST;
break;
@@ -873,6 +874,8 @@ static struct db_sys_list *_db_rule_gen_64(const struct arch_def *arch,
s_new->valid = true;
/* run through the argument chain */
chain_len_max = arch_arg_count_max(arch);
+ if (chain_len_max < 0)
+ goto gen_64_failure;
for (iter = 0; iter < chain_len_max; iter++) {
if (chain[iter].valid == 0)
continue;
@@ -1003,6 +1006,8 @@ static struct db_sys_list *_db_rule_gen_32(const struct arch_def *arch,
s_new->valid = true;
/* run through the argument chain */
chain_len_max = arch_arg_count_max(arch);
+ if (chain_len_max < 0)
+ goto gen_32_failure;
for (iter = 0; iter < chain_len_max; iter++) {
if (chain[iter].valid == 0)
continue;
diff --git a/src/gen_bpf.c b/src/gen_bpf.c
index 826cc28..8f3ea41 100644
--- a/src/gen_bpf.c
+++ b/src/gen_bpf.c
@@ -490,6 +490,7 @@ static int _hsh_add(struct bpf_state *state, struct bpf_blk **blk_p,
h_new->found = (found ? 1 : 0);

/* insert the block into the hash table */
+hsh_add_restart:
h_iter = state->htbl[h_val & _BPF_HASH_MASK];
if (h_iter != NULL) {
do {
@@ -530,13 +531,14 @@ static int _hsh_add(struct bpf_state *state, struct bpf_blk **blk_p,
/* overflow */
blk->flag_hash = false;
blk->hash = 0;
+ free(h_new);
return -EFAULT;
}
h_val += ((uint64_t)1 << 32);
h_new->blk->hash = h_val;

/* restart at the beginning of the bucket */
- h_iter = state->htbl[h_val & _BPF_HASH_MASK];
+ goto hsh_add_restart;
} else {
/* no match, move along */
h_prev = h_iter;
@@ -1082,8 +1084,10 @@ static struct bpf_blk *_gen_bpf_syscall(struct bpf_state *state,

/* generate the argument chains */
blk_c = _gen_bpf_chain(state, sys, sys->chains, &def_jump, &a_state);
- if (blk_c == NULL)
+ if (blk_c == NULL) {
+ _blk_free(state, blk_s);
return NULL;
+ }

/* syscall check */
_BPF_INSTR(instr, _BPF_OP(state->arch, BPF_JMP + BPF_JEQ),
@@ -1359,10 +1363,10 @@ static int _gen_bpf_build_jmp_ret(struct bpf_state *state,
j_len += b_jmp->blk_cnt;
b_jmp = b_jmp->next;
}
- if (j_len <= _BPF_JMP_MAX_RET && b_jmp == blk_ret)
- return 0;
if (b_jmp == NULL)
return -EFAULT;
+ if (j_len <= _BPF_JMP_MAX_RET && b_jmp == blk_ret)
+ return 0;

/* we need a closer return instruction, see if one already exists */
j_len = blk->blk_cnt - (offset + 1);
@@ -1372,10 +1376,10 @@ static int _gen_bpf_build_jmp_ret(struct bpf_state *state,
j_len += b_jmp->blk_cnt;
b_jmp = b_jmp->next;
}
- if (j_len <= _BPF_JMP_MAX_RET && b_jmp->hash == tgt_hash)
- return 0;
if (b_jmp == NULL)
return -EFAULT;
+ if (j_len <= _BPF_JMP_MAX_RET && b_jmp->hash == tgt_hash)
+ return 0;

/* we need to insert a new return instruction - create one */
b_new = _gen_bpf_action(state, NULL, blk_ret->blks[0].k.tgt.imm_k);
@@ -1446,10 +1450,10 @@ static int _gen_bpf_build_jmp(struct bpf_state *state,
jmp_len += b_jmp->blk_cnt;
b_jmp = b_jmp->next;
}
- if (jmp_len <= _BPF_JMP_MAX && b_jmp == b_tgt)
- return 0;
if (b_jmp == NULL)
return -EFAULT;
+ if (jmp_len <= _BPF_JMP_MAX && b_jmp == b_tgt)
+ return 0;

/* we need a long jump, see if one already exists */
jmp_len = blk->blk_cnt - (offset + 1);
@@ -1459,10 +1463,10 @@ static int _gen_bpf_build_jmp(struct bpf_state *state,
jmp_len += b_jmp->blk_cnt;
b_jmp = b_jmp->next;
}
- if (jmp_len <= _BPF_JMP_MAX && b_jmp->hash == tgt_hash)
- return 0;
if (b_jmp == NULL)
return -EFAULT;
+ if (jmp_len <= _BPF_JMP_MAX && b_jmp->hash == tgt_hash)
+ return 0;

/* we need to insert a long jump - create one */
_BPF_INSTR(instr, _BPF_OP(state->arch, BPF_JMP + BPF_JA),

Loading...