[Intel-wired-lan] [bug report] idpf: add core init and interrupt request
Linga, Pavan Kumar
pavan.kumar.linga at intel.com
Sat Jul 20 00:31:12 UTC 2024
On 7/19/2024 4:53 PM, Dan Carpenter wrote:
> Hello Pavan Kumar Linga,
>
> Commit 4930fbf419a7 ("idpf: add core init and interrupt request")
> from Aug 7, 2023 (linux-next), leads to the following Smatch static
> checker warning:
>
> drivers/net/ethernet/intel/idpf/idpf_lib.c:417 idpf_intr_req()
> error: we previously assumed 'adapter->req_vec_chunks' could be null (see line 360)
>
> drivers/net/ethernet/intel/idpf/idpf_lib.c
> 315 int idpf_intr_req(struct idpf_adapter *adapter)
> 316 {
> 317 u16 default_vports = idpf_get_default_vports(adapter);
> 318 int num_q_vecs, total_vecs, num_vec_ids;
> 319 int min_vectors, v_actual, err;
> 320 unsigned int vector;
> 321 u16 *vecids;
> 322
> 323 total_vecs = idpf_get_reserved_vecs(adapter);
> 324 num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
> 325
> 326 err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
> 327 if (err) {
> 328 dev_err(&adapter->pdev->dev,
> 329 "Failed to allocate %d vectors: %d\n", num_q_vecs, err);
> 330
> 331 return -EAGAIN;
> 332 }
> 333
> 334 min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
> 335 v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
> 336 total_vecs, PCI_IRQ_MSIX);
> 337 if (v_actual < min_vectors) {
> 338 dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
> 339 v_actual);
> 340 err = -EAGAIN;
> 341 goto send_dealloc_vecs;
> 342 }
> 343
> 344 adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
> 345 GFP_KERNEL);
> 346
> 347 if (!adapter->msix_entries) {
> 348 err = -ENOMEM;
> 349 goto free_irq;
> 350 }
> 351
> 352 idpf_set_mb_vec_id(adapter);
> 353
> 354 vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
> 355 if (!vecids) {
> 356 err = -ENOMEM;
> 357 goto free_msix;
> 358 }
> 359
> 360 if (adapter->req_vec_chunks) {
> ^^^^^^^^^^^^^^^^^^^^^^^
> If ->req_vec_chunks is NULL the error handling will crash
Thanks Dan for pointing at it.
After taking a closer look at the code, found that 'req_vec_chunks'
cannot be NULL if the control reach here. idpf_send_alloc_vectors_msg
(Ln 326) will return an error if that happens. We can unconditionally
access the 'req_vec_chunks' at this point.
I will post a fix for it.
Thanks,
Pavan
>
> 361 struct virtchnl2_vector_chunks *vchunks;
> 362 struct virtchnl2_alloc_vectors *ac;
> 363
> 364 ac = adapter->req_vec_chunks;
> 365 vchunks = &ac->vchunks;
> 366
> 367 num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
> 368 vchunks);
> 369 if (num_vec_ids < v_actual) {
> 370 err = -EINVAL;
> 371 goto free_vecids;
> 372 }
> 373 } else {
> 374 int i;
> 375
> 376 for (i = 0; i < v_actual; i++)
> 377 vecids[i] = i;
> 378 }
> 379
> 380 for (vector = 0; vector < v_actual; vector++) {
> 381 adapter->msix_entries[vector].entry = vecids[vector];
> 382 adapter->msix_entries[vector].vector =
> 383 pci_irq_vector(adapter->pdev, vector);
> 384 }
> 385
> 386 adapter->num_req_msix = total_vecs;
> 387 adapter->num_msix_entries = v_actual;
> 388 /* 'num_avail_msix' is used to distribute excess vectors to the vports
> 389 * after considering the minimum vectors required per each default
> 390 * vport
> 391 */
> 392 adapter->num_avail_msix = v_actual - min_vectors;
> 393
> 394 /* Fill MSIX vector lifo stack with vector indexes */
> 395 err = idpf_init_vector_stack(adapter);
> 396 if (err)
> 397 goto free_vecids;
> 398
> 399 err = idpf_mb_intr_init(adapter);
> 400 if (err)
> 401 goto deinit_vec_stack;
> 402 idpf_mb_irq_enable(adapter);
> 403 kfree(vecids);
> 404
> 405 return 0;
> 406
> 407 deinit_vec_stack:
> 408 idpf_deinit_vector_stack(adapter);
> 409 free_vecids:
> 410 kfree(vecids);
> 411 free_msix:
> 412 kfree(adapter->msix_entries);
> 413 adapter->msix_entries = NULL;
> 414 free_irq:
> 415 pci_free_irq_vectors(adapter->pdev);
> 416 send_dealloc_vecs:
> --> 417 idpf_send_dealloc_vectors_msg(adapter);
> ^^^^^^^
> Inside this function
>
> 418
> 419 return err;
> 420 }
>
> regards,
> dan carpenter
More information about the Intel-wired-lan
mailing list