New Defects reported by Coverity Scan for freerangerouting/frr

scan-admin at coverity.com scan-admin at coverity.com
Mon Mar 4 22:21:47 UTC 2024


Hi,

Please find the latest report on new defect(s) introduced to freerangerouting/frr found with Coverity Scan.

32 new defect(s) introduced to freerangerouting/frr found with Coverity Scan.


New defect(s) Reported-by: Coverity Scan
Showing 20 of 32 defect(s)


** CID 1583972:  Concurrent data access violations  (MISSING_LOCK)
/zebra/zebra_dplane.c: 7532 in zebra_dplane_init_internal()


________________________________________________________________________________________________________
*** CID 1583972:  Concurrent data access violations  (MISSING_LOCK)
/zebra/zebra_dplane.c: 7532 in zebra_dplane_init_internal()
7526     	memset(&zdplane_info, 0, sizeof(zdplane_info));
7527     
7528     	pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
7529     
7530     	dplane_prov_list_init(&zdplane_info.dg_providers);
7531     
>>>     CID 1583972:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "zdplane_info.dg_update_list" without holding lock "zebra_dplane_globals.dg_mutex". Elsewhere, "zebra_dplane_globals.dg_update_list" is written to with "zebra_dplane_globals.dg_mutex" held 5 out of 6 times.
7532     	dplane_ctx_list_init(&zdplane_info.dg_update_list);
7533     	zns_info_list_init(&zdplane_info.dg_zns_list);
7534     
7535     	zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
7536     
7537     	zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;

** CID 1583971:  Concurrent data access violations  (MISSING_LOCK)
/zebra/dplane_fpm_nl.c: 1398 in fpm_process_queue()


________________________________________________________________________________________________________
*** CID 1583971:  Concurrent data access violations  (MISSING_LOCK)
/zebra/dplane_fpm_nl.c: 1398 in fpm_process_queue()
1392     	struct zebra_dplane_ctx *ctx;
1393     	bool no_bufs = false;
1394     	uint64_t processed_contexts = 0;
1395     
1396     	while (true) {
1397     		/* No space available yet. */
>>>     CID 1583971:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "fnc->obuf" without holding lock "fpm_nl_ctx.obuf_mutex". Elsewhere, "fpm_nl_ctx.obuf" is written to with "fpm_nl_ctx.obuf_mutex" held 8 out of 8 times.
1398     		if (STREAM_WRITEABLE(fnc->obuf) < NL_PKT_BUF_SIZE) {
1399     			no_bufs = true;
1400     			break;
1401     		}
1402     
1403     		/* Dequeue next item or quit processing. */

** CID 1583970:  Program hangs  (LOCK)
/zebra/zebra_dplane.c: 6892 in kernel_dplane_process_ipset_entry()


________________________________________________________________________________________________________
*** CID 1583970:  Program hangs  (LOCK)
/zebra/zebra_dplane.c: 6892 in kernel_dplane_process_ipset_entry()
6886     static void
6887     kernel_dplane_process_ipset_entry(struct zebra_dplane_provider *prov,
6888     				  struct zebra_dplane_ctx *ctx)
6889     {
6890     	zebra_pbr_process_ipset_entry(ctx);
6891     	dplane_provider_enqueue_out_ctx(prov, ctx);
>>>     CID 1583970:  Program hangs  (LOCK)
>>>     Returning without unlocking "prov->dp_mutex".
6892     }
6893     
6894     void dplane_rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
6895     			      struct prefix_ipv6 *src_p, struct route_entry *re,
6896     			      struct nexthop_group *ng, int startup,
6897     			      struct zebra_dplane_ctx *ctx)

** CID 1583969:  Program hangs  (ORDER_REVERSAL)


________________________________________________________________________________________________________
*** CID 1583969:  Program hangs  (ORDER_REVERSAL)
/bgpd/bgp_keepalives.c: 198 in bgp_keepalives_start()
192     	pthread_mutex_lock(peerhash_mtx);
193     
194     	/* register cleanup handler */
195     	pthread_cleanup_push(&bgp_keepalives_finish, NULL);
196     
197     	/* notify anybody waiting on us that we are done starting up */
>>>     CID 1583969:  Program hangs  (ORDER_REVERSAL)
>>>     Calling "frr_pthread_notify_running" acquires lock "frr_pthread.running_cond_mtx" while holding lock "peerhash_mtx" (count: 1 / 2).
198     	frr_pthread_notify_running(fpt);
199     
200     	while (atomic_load_explicit(&fpt->running, memory_order_relaxed)) {
201     		if (peerhash->count > 0)
202     			pthread_cond_timedwait(peerhash_cond, peerhash_mtx,
203     					       &next_update_ts);

** CID 1583968:    (LOCK)
/zebra/zebra_dplane.c: 6926 in kernel_dplane_process_func()
/zebra/zebra_dplane.c: 6968 in kernel_dplane_process_func()
/zebra/zebra_dplane.c: 6951 in kernel_dplane_process_func()
/zebra/zebra_dplane.c: 6968 in kernel_dplane_process_func()
/zebra/zebra_dplane.c: 6951 in kernel_dplane_process_func()


________________________________________________________________________________________________________
*** CID 1583968:    (LOCK)
/zebra/zebra_dplane.c: 6934 in kernel_dplane_process_func()
6928     			break;
6929     		if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6930     			kernel_dplane_log_detail(ctx);
6931     
6932     		if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_ADD
6933     		     || dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_DELETE))
>>>     CID 1583968:    (LOCK)
>>>     "kernel_dplane_process_iptable" unlocks "prov->dp_mutex" while it is unlocked.
6934     			kernel_dplane_process_iptable(prov, ctx);
6935     		else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ADD
6936     			  || dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_DELETE))
6937     			kernel_dplane_process_ipset(prov, ctx);
6938     		else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ENTRY_ADD
6939     			  || dplane_ctx_get_op(ctx)
/zebra/zebra_dplane.c: 6941 in kernel_dplane_process_func()
6935     		else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ADD
6936     			  || dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_DELETE))
6937     			kernel_dplane_process_ipset(prov, ctx);
6938     		else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ENTRY_ADD
6939     			  || dplane_ctx_get_op(ctx)
6940     				     == DPLANE_OP_IPSET_ENTRY_DELETE))
>>>     CID 1583968:    (LOCK)
>>>     "kernel_dplane_process_ipset_entry" unlocks "prov->dp_mutex" while it is unlocked.
6941     			kernel_dplane_process_ipset_entry(prov, ctx);
6942     		else
6943     			dplane_ctx_list_add_tail(&work_list, ctx);
6944     	}
6945     
6946     	kernel_update_multi(&work_list);
/zebra/zebra_dplane.c: 6937 in kernel_dplane_process_func()
6931     
6932     		if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_ADD
6933     		     || dplane_ctx_get_op(ctx) == DPLANE_OP_IPTABLE_DELETE))
6934     			kernel_dplane_process_iptable(prov, ctx);
6935     		else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ADD
6936     			  || dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_DELETE))
>>>     CID 1583968:    (LOCK)
>>>     "kernel_dplane_process_ipset" unlocks "prov->dp_mutex" while it is unlocked.
6937     			kernel_dplane_process_ipset(prov, ctx);
6938     		else if ((dplane_ctx_get_op(ctx) == DPLANE_OP_IPSET_ENTRY_ADD
6939     			  || dplane_ctx_get_op(ctx)
6940     				     == DPLANE_OP_IPSET_ENTRY_DELETE))
6941     			kernel_dplane_process_ipset_entry(prov, ctx);
6942     		else
/zebra/zebra_dplane.c: 6926 in kernel_dplane_process_func()
6920     
6921     	if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6922     		zlog_debug("dplane provider '%s': processing",
6923     			   dplane_provider_get_name(prov));
6924     
6925     	for (counter = 0; counter < limit; counter++) {
>>>     CID 1583968:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" unlocks "prov->dp_mutex" while it is unlocked.
6926     		ctx = dplane_provider_dequeue_in_ctx(prov);
6927     		if (ctx == NULL)
6928     			break;
6929     		if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6930     			kernel_dplane_log_detail(ctx);
6931     
/zebra/zebra_dplane.c: 6968 in kernel_dplane_process_func()
6962     		atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
6963     					  1, memory_order_relaxed);
6964     
6965     		dplane_provider_work_ready();
6966     	}
6967     
>>>     CID 1583968:    (LOCK)
>>>     Returning without unlocking "prov->dp_mutex".
6968     	return 0;
6969     }
6970     
6971     static int kernel_dplane_shutdown_func(struct zebra_dplane_provider *prov,
6972     				       bool early)
6973     {
/zebra/zebra_dplane.c: 6926 in kernel_dplane_process_func()
6920     
6921     	if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6922     		zlog_debug("dplane provider '%s': processing",
6923     			   dplane_provider_get_name(prov));
6924     
6925     	for (counter = 0; counter < limit; counter++) {
>>>     CID 1583968:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" unlocks "prov->dp_mutex" while it is unlocked.
6926     		ctx = dplane_provider_dequeue_in_ctx(prov);
6927     		if (ctx == NULL)
6928     			break;
6929     		if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6930     			kernel_dplane_log_detail(ctx);
6931     
/zebra/zebra_dplane.c: 6951 in kernel_dplane_process_func()
6945     
6946     	kernel_update_multi(&work_list);
6947     
6948     	while ((ctx = dplane_ctx_list_pop(&work_list)) != NULL) {
6949     		kernel_dplane_handle_result(ctx);
6950     
>>>     CID 1583968:    (LOCK)
>>>     "dplane_provider_enqueue_out_ctx" locks "prov->dp_mutex" while it is locked.
6951     		dplane_provider_enqueue_out_ctx(prov, ctx);
6952     	}
6953     
6954     	/* Ensure that we'll run the work loop again if there's still
6955     	 * more work to do.
6956     	 */
/zebra/zebra_dplane.c: 6926 in kernel_dplane_process_func()
6920     
6921     	if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6922     		zlog_debug("dplane provider '%s': processing",
6923     			   dplane_provider_get_name(prov));
6924     
6925     	for (counter = 0; counter < limit; counter++) {
>>>     CID 1583968:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" locks "prov->dp_mutex" while it is locked.
6926     		ctx = dplane_provider_dequeue_in_ctx(prov);
6927     		if (ctx == NULL)
6928     			break;
6929     		if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
6930     			kernel_dplane_log_detail(ctx);
6931     
/zebra/zebra_dplane.c: 6968 in kernel_dplane_process_func()
6962     		atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
6963     					  1, memory_order_relaxed);
6964     
6965     		dplane_provider_work_ready();
6966     	}
6967     
>>>     CID 1583968:    (LOCK)
>>>     Returning without unlocking "prov->dp_mutex".
6968     	return 0;
6969     }
6970     
6971     static int kernel_dplane_shutdown_func(struct zebra_dplane_provider *prov,
6972     				       bool early)
6973     {
/zebra/zebra_dplane.c: 6951 in kernel_dplane_process_func()
6945     
6946     	kernel_update_multi(&work_list);
6947     
6948     	while ((ctx = dplane_ctx_list_pop(&work_list)) != NULL) {
6949     		kernel_dplane_handle_result(ctx);
6950     
>>>     CID 1583968:    (LOCK)
>>>     "dplane_provider_enqueue_out_ctx" unlocks "prov->dp_mutex" while it is unlocked.
6951     		dplane_provider_enqueue_out_ctx(prov, ctx);
6952     	}
6953     
6954     	/* Ensure that we'll run the work loop again if there's still
6955     	 * more work to do.
6956     	 */

** CID 1583967:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_timers.c: 404 in reset_timer()


________________________________________________________________________________________________________
*** CID 1583967:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_timers.c: 404 in reset_timer()
398     	pthread_mutex_unlock(&timers_context_->timer_list_lock);
399     
400     	if (timers_context_->timer_cancel_func) {
401     		/* Keeping this log for now, since in older versions of FRR the
402     		 * timer cancellation was blocking. This allows us to see how
403     		 * long the it takes.*/
>>>     CID 1583967:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "timer_to_reset->timer_id" without holding lock "pcep_timers_context_.timer_list_lock". Elsewhere, "pcep_timer_.timer_id" is written to with "pcep_timers_context_.timer_list_lock" held 4 out of 4 times (4 of these accesses strongly imply that it is necessary).
404     		pcep_log(LOG_DEBUG, "%s: Resetting timer [%d] with callback",
405     			 __func__, timer_to_reset->timer_id);
406     		timers_context_->timer_cancel_func(
407     			&timer_to_reset->external_timer);
408     		timer_to_reset->external_timer = NULL;
409     	}

** CID 1583966:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_timers.c: 53 in timer_list_node_timer_id_compare()


________________________________________________________________________________________________________
*** CID 1583966:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_timers.c: 53 in timer_list_node_timer_id_compare()
47     
48     /* simple compare method callback used by pcep_utils_ordered_list
49      * ordered_list_remove_first_node_equals2 to remove a timer based on
50      * its timer_id. */
51     int timer_list_node_timer_id_compare(void *list_entry, void *new_entry)
52     {
>>>     CID 1583966:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "((pcep_timer *)new_entry)->timer_id" without holding lock "pcep_timers_context_.timer_list_lock". Elsewhere, "pcep_timer_.timer_id" is written to with "pcep_timers_context_.timer_list_lock" held 4 out of 4 times (4 of these accesses strongly imply that it is necessary).
53     	return ((pcep_timer *)new_entry)->timer_id
54     	       - ((pcep_timer *)list_entry)->timer_id;
55     }
56     
57     /* simple compare method callback used by pcep_utils_ordered_list
58      * ordered_list_remove_first_node_equals2 to remove a timer based on

** CID 1583965:    (LOCK)
/zebra/dplane_fpm_nl.c: 1650 in fpm_nl_process()
/zebra/dplane_fpm_nl.c: 1601 in fpm_nl_process()
/zebra/dplane_fpm_nl.c: 1650 in fpm_nl_process()


________________________________________________________________________________________________________
*** CID 1583965:    (LOCK)
/zebra/dplane_fpm_nl.c: 1650 in fpm_nl_process()
1644     				&fnc->t_dequeue);
1645     
1646     	/* Ensure dataplane thread is rescheduled if we hit the work limit */
1647     	if (counter >= limit)
1648     		dplane_provider_work_ready();
1649     
>>>     CID 1583965:    (LOCK)
>>>     Returning without unlocking "prov->dp_mutex".
1650     	return 0;
1651     }
1652     
1653     static int fpm_nl_new(struct event_loop *tm)
1654     {
1655     	struct zebra_dplane_provider *prov = NULL;
/zebra/dplane_fpm_nl.c: 1601 in fpm_nl_process()
1595     	int counter, limit;
1596     	uint64_t cur_queue, peak_queue = 0, stored_peak_queue;
1597     
1598     	fnc = dplane_provider_get_data(prov);
1599     	limit = dplane_provider_get_work_limit(prov);
1600     	for (counter = 0; counter < limit; counter++) {
>>>     CID 1583965:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" unlocks "prov->dp_mutex" while it is unlocked.
1601     		ctx = dplane_provider_dequeue_in_ctx(prov);
1602     		if (ctx == NULL)
1603     			break;
1604     
1605     		/*
1606     		 * Skip all notifications if not connected, we'll walk the RIB
/zebra/dplane_fpm_nl.c: 1650 in fpm_nl_process()
1644     				&fnc->t_dequeue);
1645     
1646     	/* Ensure dataplane thread is rescheduled if we hit the work limit */
1647     	if (counter >= limit)
1648     		dplane_provider_work_ready();
1649     
>>>     CID 1583965:    (LOCK)
>>>     Returning without unlocking "prov->dp_mutex".
1650     	return 0;
1651     }
1652     
1653     static int fpm_nl_new(struct event_loop *tm)
1654     {
1655     	struct zebra_dplane_provider *prov = NULL;
/zebra/dplane_fpm_nl.c: 1630 in fpm_nl_process()
1624     			if (peak_queue < cur_queue)
1625     				peak_queue = cur_queue;
1626     			continue;
1627     		}
1628     
1629     		dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
>>>     CID 1583965:    (LOCK)
>>>     "dplane_provider_enqueue_out_ctx" unlocks "prov->dp_mutex" while it is unlocked.
1630     		dplane_provider_enqueue_out_ctx(prov, ctx);
1631     	}
1632     
1633     	/* Update peak queue length, if we just observed a new peak */
1634     	stored_peak_queue = atomic_load_explicit(
1635     		&fnc->counters.ctxqueue_len_peak, memory_order_relaxed);
/zebra/dplane_fpm_nl.c: 1601 in fpm_nl_process()
1595     	int counter, limit;
1596     	uint64_t cur_queue, peak_queue = 0, stored_peak_queue;
1597     
1598     	fnc = dplane_provider_get_data(prov);
1599     	limit = dplane_provider_get_work_limit(prov);
1600     	for (counter = 0; counter < limit; counter++) {
>>>     CID 1583965:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" locks "prov->dp_mutex" while it is locked.
1601     		ctx = dplane_provider_dequeue_in_ctx(prov);
1602     		if (ctx == NULL)
1603     			break;
1604     
1605     		/*
1606     		 * Skip all notifications if not connected, we'll walk the RIB

** CID 1583964:  Concurrent data access violations  (MISSING_LOCK)
/lib/northbound_grpc.cpp: 1127 in grpc_pthread_start(void *)()


________________________________________________________________________________________________________
*** CID 1583964:  Concurrent data access violations  (MISSING_LOCK)
/lib/northbound_grpc.cpp: 1127 in grpc_pthread_start(void *)()
1121     	builder.RegisterService(&service);
1122     	builder.AddChannelArgument(
1123     		GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS, 5000);
1124     	std::unique_ptr<grpc::ServerCompletionQueue> cq =
1125     		builder.AddCompletionQueue();
1126     	std::unique_ptr<grpc::Server> server = builder.BuildAndStart();
>>>     CID 1583964:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "s_server" without holding lock "s_server_lock". Elsewhere, "s_server" is written to with "s_server_lock" held 2 out of 3 times.
1127     	s_server = server.get();
1128     
1129     	pthread_mutex_lock(&s_server_lock); // Make coverity happy
1130     	grpc_running = true;
1131     	pthread_mutex_unlock(&s_server_lock); // Make coverity happy
1132     

** CID 1583963:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_timers.c: 467 in pceplib_external_timer_expire_handler()


________________________________________________________________________________________________________
*** CID 1583963:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_timers.c: 467 in pceplib_external_timer_expire_handler()
461     					  timer_node);
462     
463     	pthread_mutex_unlock(&timers_context_->timer_list_lock);
464     
465     	/* Cannot continue if the timer does not exist */
466     	if (timer_node == NULL) {
>>>     CID 1583963:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "timer->timer_id" without holding lock "pcep_timers_context_.timer_list_lock". Elsewhere, "pcep_timer_.timer_id" is written to with "pcep_timers_context_.timer_list_lock" held 4 out of 4 times (4 of these accesses strongly imply that it is necessary).
467     		pcep_log(
468     			LOG_WARNING,
469     			"%s: pceplib_external_timer_expire_handler timer [%p] id [%d] does not exist",
470     			__func__, timer, timer->timer_id);
471     		return;
472     	}

** CID 1583962:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/test/pcep_socket_comm_loop_test.c: 176 in test_handle_reads_read_message_close()


________________________________________________________________________________________________________
*** CID 1583962:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/test/pcep_socket_comm_loop_test.c: 176 in test_handle_reads_read_message_close()
170     	/* Setup the comm session so that it can read.
171     	 * It should read 0 bytes, which simulates that the socket closed */
172     	test_comm_session->socket_fd = 11;
173     	read_handler_info.bytes_read = 0;
174     	FD_SET(test_comm_session->socket_fd,
175     	       &test_socket_comm_handle->read_master_set);
>>>     CID 1583962:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "test_socket_comm_handle->read_list" without holding lock "pcep_socket_comm_handle_.socket_comm_mutex". Elsewhere, "pcep_socket_comm_handle_.read_list" is written to with "pcep_socket_comm_handle_.socket_comm_mutex" held 5 out of 7 times.
176     	ordered_list_add_node(test_socket_comm_handle->read_list,
177     			      test_comm_session);
178     
179     	handle_reads(test_socket_comm_handle);
180     
181     	CU_ASSERT_TRUE(read_handler_info.handler_called);

** CID 1583961:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_timers.c: 291 in create_timer()


________________________________________________________________________________________________________
*** CID 1583961:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_timers.c: 291 in create_timer()
285     	if (timers_context_->timer_create_func) {
286     		timers_context_->timer_create_func(
287     			timers_context_->external_timer_infra_data,
288     			&timer->external_timer, sleep_seconds, timer);
289     	}
290     
>>>     CID 1583961:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "timer->timer_id" without holding lock "pcep_timers_context_.timer_list_lock". Elsewhere, "pcep_timer_.timer_id" is written to with "pcep_timers_context_.timer_list_lock" held 4 out of 4 times (4 of these accesses strongly imply that it is necessary).
291     	return timer->timer_id;
292     }
293     
294     
295     bool cancel_timer(int timer_id)
296     {

** CID 1583960:  Performance inefficiencies  (AUTO_CAUSES_COPY)
/lib/northbound_grpc.cpp: 730 in HandleUnaryLoadToCandidate(UnaryRpcState<frr::LoadToCandidateRequest, frr::LoadToCandidateResponse> *)()


________________________________________________________________________________________________________
*** CID 1583960:  Performance inefficiencies  (AUTO_CAUSES_COPY)
/lib/northbound_grpc.cpp: 730 in HandleUnaryLoadToCandidate(UnaryRpcState<frr::LoadToCandidateRequest, frr::LoadToCandidateResponse> *)()
724     
725     	grpc_debug("%s(candidate_id: %u)", __func__, candidate_id);
726     
727     	// Request: LoadType type = 2;
728     	int load_type = tag->request.type();
729     	// Request: DataTree config = 3;
>>>     CID 1583960:  Performance inefficiencies  (AUTO_CAUSES_COPY)
>>>     Using the "auto" keyword without an "&" causes the copy of an object of type "frr::DataTree".
730     	auto config = tag->request.config();
731     
732     	struct candidate *candidate = tag->cdb->get_candidate(candidate_id);
733     	if (!candidate)
734     		return grpc::Status(grpc::StatusCode::NOT_FOUND,
735     				    "candidate configuration not found");

** CID 1583959:  Memory - illegal accesses  (USE_AFTER_FREE)


________________________________________________________________________________________________________
*** CID 1583959:  Memory - illegal accesses  (USE_AFTER_FREE)
/bgpd/bgp_nexthop.c: 644 in bgp_multiaccess_check_v6()
638     		return false;
639     	}
640     
641     	ret = (dest1 == dest2);
642     
643     	bgp_dest_unlock_node(dest1);
>>>     CID 1583959:  Memory - illegal accesses  (USE_AFTER_FREE)
>>>     Calling "bgp_dest_unlock_node" dereferences freed pointer "dest2".
644     	bgp_dest_unlock_node(dest2);
645     
646     	return ret;
647     }
648     
649     bool bgp_subgrp_multiaccess_check_v6(struct in6_addr nexthop,

** CID 1583958:    (LOCK)
/zebra/zebra_dplane.c: 6993 in kernel_dplane_shutdown_func()
/zebra/zebra_dplane.c: 6983 in kernel_dplane_shutdown_func()
/zebra/zebra_dplane.c: 6983 in kernel_dplane_shutdown_func()


________________________________________________________________________________________________________
*** CID 1583958:    (LOCK)
/zebra/zebra_dplane.c: 6993 in kernel_dplane_shutdown_func()
6987     	while (ctx) {
6988     		dplane_ctx_free(&ctx);
6989     
6990     		ctx = dplane_provider_dequeue_out_ctx(prov);
6991     	}
6992     
>>>     CID 1583958:    (LOCK)
>>>     Returning without unlocking "prov->dp_mutex".
6993     	return 1;
6994     }
6995     
6996     #ifdef DPLANE_TEST_PROVIDER
6997     
6998     /*
/zebra/zebra_dplane.c: 6983 in kernel_dplane_shutdown_func()
6977     		return 1;
6978     
6979     	ctx = dplane_provider_dequeue_in_ctx(prov);
6980     	while (ctx) {
6981     		dplane_ctx_free(&ctx);
6982     
>>>     CID 1583958:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" unlocks "prov->dp_mutex" while it is unlocked.
6983     		ctx = dplane_provider_dequeue_in_ctx(prov);
6984     	}
6985     
6986     	ctx = dplane_provider_dequeue_out_ctx(prov);
6987     	while (ctx) {
6988     		dplane_ctx_free(&ctx);
/zebra/zebra_dplane.c: 6983 in kernel_dplane_shutdown_func()
6977     		return 1;
6978     
6979     	ctx = dplane_provider_dequeue_in_ctx(prov);
6980     	while (ctx) {
6981     		dplane_ctx_free(&ctx);
6982     
>>>     CID 1583958:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" unlocks "prov->dp_mutex" while it is unlocked.
6983     		ctx = dplane_provider_dequeue_in_ctx(prov);
6984     	}
6985     
6986     	ctx = dplane_provider_dequeue_out_ctx(prov);
6987     	while (ctx) {
6988     		dplane_ctx_free(&ctx);

** CID 1583957:  Program hangs  (LOCK)
/zebra/zebra_dplane.c: 6877 in kernel_dplane_process_iptable()


________________________________________________________________________________________________________
*** CID 1583957:  Program hangs  (LOCK)
/zebra/zebra_dplane.c: 6877 in kernel_dplane_process_iptable()
6871     
6872     static void kernel_dplane_process_iptable(struct zebra_dplane_provider *prov,
6873     					  struct zebra_dplane_ctx *ctx)
6874     {
6875     	zebra_pbr_process_iptable(ctx);
6876     	dplane_provider_enqueue_out_ctx(prov, ctx);
>>>     CID 1583957:  Program hangs  (LOCK)
>>>     Returning without unlocking "prov->dp_mutex".
6877     }
6878     
6879     static void kernel_dplane_process_ipset(struct zebra_dplane_provider *prov,
6880     					struct zebra_dplane_ctx *ctx)
6881     {
6882     	zebra_pbr_process_ipset(ctx);

** CID 1583956:    (LOCK)
/zebra/sample_plugin.c: 72 in sample_process()
/zebra/sample_plugin.c: 72 in sample_process()


________________________________________________________________________________________________________
*** CID 1583956:    (LOCK)
/zebra/sample_plugin.c: 72 in sample_process()
66     
67     		/* Just set 'success' status and return to the dataplane */
68     		dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
69     		dplane_provider_enqueue_out_ctx(prov_p, ctx);
70     	}
71     
>>>     CID 1583956:    (LOCK)
>>>     Returning without unlocking "prov_p->dp_mutex".
72     	return 0;
73     }
74     
75     /*
76      * Init entry point called during zebra startup. This is registered during
77      * module init.
/zebra/sample_plugin.c: 63 in sample_process()
57     	limit = dplane_provider_get_work_limit(prov_p);
58     
59     	/* Respect the configured limit on the amount of work to do in
60     	 * any one call.
61     	 */
62     	for (counter = 0; counter < limit; counter++) {
>>>     CID 1583956:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" locks "prov_p->dp_mutex" while it is locked.
63     		ctx = dplane_provider_dequeue_in_ctx(prov_p);
64     		if (!ctx)
65     			break;
66     
67     		/* Just set 'success' status and return to the dataplane */
68     		dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
/zebra/sample_plugin.c: 63 in sample_process()
57     	limit = dplane_provider_get_work_limit(prov_p);
58     
59     	/* Respect the configured limit on the amount of work to do in
60     	 * any one call.
61     	 */
62     	for (counter = 0; counter < limit; counter++) {
>>>     CID 1583956:    (LOCK)
>>>     "dplane_provider_dequeue_in_ctx" unlocks "prov_p->dp_mutex" while it is unlocked.
63     		ctx = dplane_provider_dequeue_in_ctx(prov_p);
64     		if (!ctx)
65     			break;
66     
67     		/* Just set 'success' status and return to the dataplane */
68     		dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
/zebra/sample_plugin.c: 69 in sample_process()
63     		ctx = dplane_provider_dequeue_in_ctx(prov_p);
64     		if (!ctx)
65     			break;
66     
67     		/* Just set 'success' status and return to the dataplane */
68     		dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
>>>     CID 1583956:    (LOCK)
>>>     "dplane_provider_enqueue_out_ctx" unlocks "prov_p->dp_mutex" while it is unlocked.
69     		dplane_provider_enqueue_out_ctx(prov_p, ctx);
70     	}
71     
72     	return 0;
73     }
74     
/zebra/sample_plugin.c: 72 in sample_process()
66     
67     		/* Just set 'success' status and return to the dataplane */
68     		dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
69     		dplane_provider_enqueue_out_ctx(prov_p, ctx);
70     	}
71     
>>>     CID 1583956:    (LOCK)
>>>     Returning without unlocking "prov_p->dp_mutex".
72     	return 0;
73     }
74     
75     /*
76      * Init entry point called during zebra startup. This is registered during
77      * module init.

** CID 1583955:  Concurrent data access violations  (MISSING_LOCK)
/zebra/zserv.c: 1095 in zebra_show_client_detail()


________________________________________________________________________________________________________
*** CID 1583955:  Concurrent data access violations  (MISSING_LOCK)
/zebra/zserv.c: 1095 in zebra_show_client_detail()
1089     		0, client->local_es_del_cnt);
1090     	vty_out(vty, "ES-EVI      %-12u%-12u%-12u\n",
1091     		client->local_es_evi_add_cnt, 0, client->local_es_evi_del_cnt);
1092     	vty_out(vty, "Errors: %u\n", client->error_cnt);
1093     
1094     #if defined DEV_BUILD
>>>     CID 1583955:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "client->ibuf_fifo" without holding lock "zserv.ibuf_mtx". Elsewhere, "zserv.ibuf_fifo" is written to with "zserv.ibuf_mtx" held 2 out of 2 times.
1095     	vty_out(vty, "Input Fifo: %zu:%zu Output Fifo: %zu:%zu\n",
1096     		client->ibuf_fifo->count, client->ibuf_fifo->max_count,
1097     		client->obuf_fifo->count, client->obuf_fifo->max_count);
1098     #endif
1099     	vty_out(vty, "\n");
1100     }

** CID 1583954:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_session_logic.c: 56 in run_session_logic_common()


________________________________________________________________________________________________________
*** CID 1583954:  Concurrent data access violations  (MISSING_LOCK)
/pceplib/pcep_session_logic.c: 56 in run_session_logic_common()
50     
51     	session_logic_handle_ = pceplib_malloc(
52     		PCEPLIB_INFRA, sizeof(pcep_session_logic_handle));
53     	memset(session_logic_handle_, 0, sizeof(pcep_session_logic_handle));
54     
55     	session_logic_handle_->active = true;
>>>     CID 1583954:  Concurrent data access violations  (MISSING_LOCK)
>>>     Accessing "session_logic_handle_->session_list" without holding lock "pcep_session_logic_handle_.session_list_mutex". Elsewhere, "pcep_session_logic_handle_.session_list" is written to with "pcep_session_logic_handle_.session_list_mutex" held 2 out of 3 times.
56     	session_logic_handle_->session_list =
57     		ordered_list_initialize(pointer_compare_function);
58     	session_logic_handle_->session_event_queue = queue_initialize();
59     
60     	/* Initialize the event queue */
61     	session_logic_event_queue_ =

** CID 1583953:    (OVERRUN)


________________________________________________________________________________________________________
*** CID 1583953:    (OVERRUN)
/pceplib/pcep_msg_objects_encoding.c: 1021 in pcep_decode_object()
1015     	if (object == NULL) {
1016     		pcep_log(LOG_INFO, "%s: Unable to decode Object class [%d].",
1017     			 __func__, object_hdr.object_class);
1018     		return NULL;
1019     	}
1020     
>>>     CID 1583953:    (OVERRUN)
>>>     Overrunning callee's array of size 41 by passing argument "object_hdr.object_class" (which evaluates to 63) in call to "pcep_object_has_tlvs".
1021     	if (pcep_object_has_tlvs(&object_hdr)) {
1022     		object->tlv_list = dll_initialize();
1023     		int num_iterations = 0;
1024     		uint16_t tlv_index = pcep_object_get_length_by_hdr(&object_hdr);
1025     		while ((object->encoded_object_length - tlv_index) > 0
1026     		       && num_iterations++ < MAX_ITERATIONS) {
/pceplib/pcep_msg_objects_encoding.c: 1024 in pcep_decode_object()
1018     		return NULL;
1019     	}
1020     
1021     	if (pcep_object_has_tlvs(&object_hdr)) {
1022     		object->tlv_list = dll_initialize();
1023     		int num_iterations = 0;
>>>     CID 1583953:    (OVERRUN)
>>>     Overrunning callee's array of size 41 by passing argument "object_hdr.object_class" (which evaluates to 63) in call to "pcep_object_get_length_by_hdr".
1024     		uint16_t tlv_index = pcep_object_get_length_by_hdr(&object_hdr);
1025     		while ((object->encoded_object_length - tlv_index) > 0
1026     		       && num_iterations++ < MAX_ITERATIONS) {
1027     			struct pcep_object_tlv_header *tlv =
1028     				pcep_decode_tlv(obj_buf + tlv_index);
1029     			if (tlv == NULL) {


________________________________________________________________________________________________________
To view the defects in Coverity Scan visit, https://u15810271.ct.sendgrid.net/ls/click?upn=u001.AxU2LYlgjL6eX23u9ErQy-2BKADyCpvUKOL6EWmZljiu4jkWudbux5UNqYsSt9ZXO3s9m3KMDrlSX-2Bp41IzjejfmdC2hinnLY-2BZ6PvlTenLZs-3DiIEA_t0zeZlCsA34Fiw17aIfmh-2F3kFs1q7rysihvAefHXY7-2B-2Bw-2BDeyiqbFLwo3ndINl1mCjNjOQOmuV7WZryx-2BcyCR2ehXT6j0HCqYV4uYBe1Oqz8MM8i3H2qUI0zn7fZRZzkgndPYubQKk14u1cFXlqyub6kxvVHlK2mZBWetqfSyDkL0-2B-2Byw9vX0mlwhBk-2FypjsmB7SKsFyKGovF9Jh-2FiOcJQ-3D-3D




More information about the dev mailing list