onepl_full

def onepl_full(dataset, alpha=None, options=None)

Estimates parameters in an 1PL IRT Model.

This function is slow, please use onepl_mml

Args

dataset
[items x participants] matrix of True/False Values
alpha
scalar of discrimination used in model (default to 1)
options
dictionary with updates to default options

Returns

discrimination
(float) estimate of test discrimination
difficulty
(1d array) estimates of item diffiulties

Options

  • max_iteration: int
  • distribution: callable
  • quadrature_bounds: (float, float)
  • quadrature_n: int

Notes

If alpha is supplied then this solves a Rasch model

Expand source code
def onepl_full(dataset, alpha=None, options=None):
    """ Estimates parameters in an 1PL IRT Model.

    This function is slow, please use onepl_mml

    Args:
        dataset: [items x participants] matrix of True/False Values
        alpha: scalar of discrimination used in model (default to 1)
        options: dictionary with updates to default options

    Returns:
        discrimination: (float) estimate of test discrimination
        difficulty: (1d array) estimates of item diffiulties

    Options:
        * max_iteration: int
        * distribution: callable
        * quadrature_bounds: (float, float)
        * quadrature_n: int

    Notes:
        If alpha is supplied then this solves a Rasch model
    """
    options = validate_estimation_options(options)
    quad_start, quad_stop = options['quadrature_bounds']
    quad_n = options['quadrature_n']

    n_items = dataset.shape[0]
    unique_sets, counts = np.unique(dataset, axis=1, return_counts=True)
    the_sign = convert_responses_to_kernel_sign(unique_sets)

    theta = _get_quadrature_points(quad_n, quad_start, quad_stop)
    distribution = options['distribution'](theta)

    discrimination = np.ones((n_items,))
    difficulty = np.zeros((n_items,))

    def alpha_min_func(alpha_estimate):
        discrimination[:] = alpha_estimate

        for iteration in range(options['max_iteration']):
            previous_difficulty = difficulty.copy()

            # Quadrature evaluation for values that do not change
            partial_int = _compute_partial_integral(theta, difficulty,
                                                    discrimination, the_sign)
            partial_int *= distribution

            for item_ndx in range(n_items):
                # pylint: disable=cell-var-from-loop

                # remove contribution from current item
                local_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                      discrimination[item_ndx, None],
                                                      the_sign[item_ndx, None])

                partial_int /= local_int

                def min_local_func(beta_estimate):
                    difficulty[item_ndx] = beta_estimate

                    estimate_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                             discrimination[item_ndx, None],
                                                             the_sign[item_ndx, None])

                    estimate_int *= partial_int

                    otpt = integrate.fixed_quad(
                        lambda x: estimate_int, quad_start, quad_stop, n=quad_n)[0]

                    return -np.log(otpt).dot(counts)

                fminbound(min_local_func, -4, 4)

                # Update the partial integral based on the new found values
                estimate_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                         discrimination[item_ndx, None],
                                                         the_sign[item_ndx, None])
                # update partial integral
                partial_int *= estimate_int

            if(np.abs(previous_difficulty - difficulty).max() < 1e-3):
                break

        cost = integrate.fixed_quad(
            lambda x: partial_int, quad_start, quad_stop, n=quad_n)[0]
        return -np.log(cost).dot(counts)

    if alpha is None:  # OnePl Solver
        alpha = fminbound(alpha_min_func, 0.1, 4)
    else:  # Rasch Solver
        alpha_min_func(alpha)

    return alpha, difficulty
Last modified April 7, 2020: Updating documentation. (3da0254)