* adding a measure of uncertainty to predictions

Bayesian Metrics

This is modified from fastai_bayesian github code by Daniel Huynh but modified to use Pytorch tensors instead of Numpy arrays

entropy[source]

entropy(probs)

Return the prediction of a TNC tensor with :

  • T : the number of samples
  • N : the batch size
  • C : the number of classes

uncertainty_best_probability[source]

uncertainty_best_probability(probs)

Return the standard deviation of the most probable class

BALD[source]

BALD(probs)

Information Gain, distance between the entropy of averages and average of entropy

top_k_uncertainty[source]

top_k_uncertainty(s, k=5, reverse=True)

Return the top k indexes

plot_hist_groups[source]

plot_hist_groups(pred, y, metric, bins=None, figsize=(16, 16))

Get predictions for a test set

This patches a method to learner to make mc dropout predictions

Learner.bayes_get_preds[source]

Learner.bayes_get_preds(ds_idx=1, dl=None, n_sample=10, act=None, with_loss=False, **kwargs)

Get MC Dropout predictions from a learner, and eventually reduce the samples

Get predictions for an image item

Learner.bayes_predict[source]

Learner.bayes_predict(item, rm_type_tfms=None, with_input=False, sample_size=10, reduce=True)

gets a sample distribution of predictions and computes entropy

Add uncertainty threshold to prediction

Learner.bayes_predict_with_uncertainty[source]

Learner.bayes_predict_with_uncertainty(item, rm_type_tfms=None, with_input=False, threshold_entropy=0.2, sample_size=10, reduce=True)

gets prediction results plus if prediction passes entropy threshold

Add kitchen sink method to build dataframe, dataloader and predictions

Learner.bayes_build_inference_dfdlpreds[source]

Learner.bayes_build_inference_dfdlpreds(path, dataset, item_count=100, n_sample=10)

Test Functions

from fastai.test_utils import synth_dbunch, synth_learner
try:
    from contextlib import nullcontext # python 3.7 only
except ImportError as e:
    from contextlib import suppress as nullcontext # supported in 3.6 below
dls = synth_dbunch()
dls.vocab = [1,]
learner = synth_learner(data=dls)
learner.no_bar = nullcontext
bears_dl = dls.train
pets_dl = dls.valid
N_SAMPLE = 2
CATEGORIES = 1
BS = 160
from fastai.learner import load_learner
from fastai.data.transforms import get_image_files
from fastai.data.external import Config
from fastai.vision.core import PILImage
import random
# setup objects using local paths
cfg = Config()
learner = load_learner(cfg.model_path/'bears_classifier'/'export.pkl')
bear_path = cfg.data_path/'bears'
pet_path = cfg.data_path/'pets'
bear_img_files = get_image_files(bear_path)
pet_img_files = get_image_files(pet_path)

random.seed(69420) # fix images retrieved
pet_img = PILImage.create(pet_img_files.shuffle()[0])
bear_img = PILImage.create(bear_img_files.shuffle()[0])

pet_items = pet_img_files.shuffle()[:20]
bear_items = bear_img_files.shuffle()[:20]

pet_dset = pet_items.map(lambda o: PILImage.create(o))
bear_dset = bear_items.map(lambda o: PILImage.create(o))
pets_dl = learner.dls.test_dl(pet_dset,num_workers=0)

bears_dl = learner.dls.test_dl(bear_dset,num_workers=0)
# xb.shape = torch.size([20,3,224,224])
N_SAMPLE = 2
CATEGORIES = 3
BS = 20
from fastcore.test import *
Bayes Prediction for Test Set
bear_res = learner.bayes_get_preds(dl=bears_dl, n_sample=N_SAMPLE)
pet_res = learner.bayes_get_preds(dl=pets_dl, n_sample=N_SAMPLE)
test_eq(len(bear_res),6)
# ci 6
# local 6
test_eq(bear_res[0].shape, [N_SAMPLE,BS,CATEGORIES])
#ci torch.Size([2, 160, 1])
#local torch.Size([5, 20, 3])
test_eq(bear_res[1].shape, [BS, CATEGORIES])
#ci torch.Size([160, 1])
#local torch.Size([20, 3])
test_eq(bear_res[2].shape,[BS])
#ci torch.Size([160])
#local torch.Size([20])
test_eq(bear_res[3].shape,[BS])
# ci torch.Size([160])
# local torch.Size([20])
test_eq(bear_res[4].shape,[BS]) 
#ci torch.Size([160])
#local torch.Size([20])
test_eq(len(bear_res[5]),BS)
# ci 160
# local 20