// SPDX-License-Identifier: Apache-2.0 m4_divert(if_is_binop_subset) GrB_Info GB (_Cewise_fulla) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) ; m4_divert(0) GrB_Info GB (_Cewise_fulln) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) ; m4_divert(if_binop_is_semiring_multiplier) GrB_Info GB (_AxD) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) ; GrB_Info GB (_DxB) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) ; m4_divert(0) GrB_Info GB (_AaddB) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, const int64_t *restrict M_ek_slicing, const int M_nthreads, const int M_ntasks, const int64_t *restrict A_ek_slicing, const int A_nthreads, const int A_ntasks, const int64_t *restrict B_ek_slicing, const int B_nthreads, const int B_ntasks ) ; m4_divert(if_binop_emult_is_enabled) GrB_Info GB (_AunionB) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, const int64_t *restrict M_ek_slicing, const int M_nthreads, const int M_ntasks, const int64_t *restrict A_ek_slicing, const int A_nthreads, const int A_ntasks, const int64_t *restrict B_ek_slicing, const int B_nthreads, const int B_ntasks ) ; GrB_Info GB (_AemultB_08) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads ) ; GrB_Info GB (_AemultB_02) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) ; m4_divert(if_binop_is_non_commutative) GrB_Info GB (_AemultB_03) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) ; m4_divert(if_binop_emult_is_enabled) GrB_Info GB (_AemultB_04) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) ; GrB_Info GB (_AemultB_bitmap) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads ) ; m4_divert(if_binop_bind_is_enabled) GrB_Info GB (_bind1st) ( GB_void *Cx_output, const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) ; GrB_Info GB (_bind1st_tran) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) ; GrB_Info GB (_bind2nd) ( GB_void *Cx_output, const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) ; GrB_Info GB (_bind2nd_tran) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) ; m4_divert(0)