twopl_mml
def twopl_mml(dataset, options=None)Estimates parameters in a 2PL IRT model.
Args
dataset- [items x participants] matrix of True/False Values
options- dictionary with updates to default options
Returns
discrimination- (1d array) estimate of item discriminations
difficulty- (1d array) estimates of item diffiulties
Options
- max_iteration: int
- distribution: callable
- quadrature_bounds: (float, float)
- quadrature_n: int
Expand source code
def twopl_mml(dataset, options=None): """ Estimates parameters in a 2PL IRT model. Args: dataset: [items x participants] matrix of True/False Values options: dictionary with updates to default options Returns: discrimination: (1d array) estimate of item discriminations difficulty: (1d array) estimates of item diffiulties Options: * max_iteration: int * distribution: callable * quadrature_bounds: (float, float) * quadrature_n: int """ options = validate_estimation_options(options) quad_start, quad_stop = options['quadrature_bounds'] quad_n = options['quadrature_n'] n_items = dataset.shape[0] n_no, n_yes = get_true_false_counts(dataset) scalar = n_yes / (n_yes + n_no) unique_sets, counts = np.unique(dataset, axis=1, return_counts=True) the_sign = convert_responses_to_kernel_sign(unique_sets) theta = _get_quadrature_points(quad_n, quad_start, quad_stop) distribution = options['distribution'](theta) # Perform the minimization discrimination = np.ones((n_items,)) difficulty = np.zeros((n_items,)) for iteration in range(options['quadrature_n']): previous_discrimination = discrimination.copy() # Quadrature evaluation for values that do not change # This is done during the outer loop to address rounding errors partial_int = _compute_partial_integral(theta, difficulty, discrimination, the_sign) partial_int *= distribution for ndx in range(n_items): # pylint: disable=cell-var-from-loop # remove contribution from current item local_int = _compute_partial_integral(theta, difficulty[ndx, None], discrimination[ndx, None], the_sign[ndx, None]) partial_int /= local_int def min_func_local(estimate): discrimination[ndx] = estimate _mml_abstract(difficulty[ndx, None], scalar[ndx, None], discrimination[ndx, None], theta, distribution, options) estimate_int = _compute_partial_integral(theta, difficulty[ndx, None], discrimination[ndx, None], the_sign[ndx, None]) estimate_int *= partial_int otpt = integrate.fixed_quad( lambda x: estimate_int, quad_start, quad_stop, n=quad_n)[0] return -np.log(otpt).dot(counts) # Solve for the discrimination parameters fminbound(min_func_local, 0.25, 6) # Update the partial integral based on the new found values estimate_int = _compute_partial_integral(theta, difficulty[ndx, None], discrimination[ndx, None], the_sign[ndx, None]) # update partial integral partial_int *= estimate_int if np.abs(discrimination - previous_discrimination).max() < 1e-3: break return discrimination, difficulty
Last modified April 7, 2020: Updating documentation. (3da0254)