From def1d61153abf7acd2360bec7baf8af59b8fe337 Mon Sep 17 00:00:00 2001 From: janezd Date: Thu, 9 Jun 2022 14:41:14 +0200 Subject: [PATCH] Predictions: Show errors --- Orange/widgets/evaluate/owpredictions.py | 284 ++++++++++-- .../evaluate/tests/test_owpredictions.py | 426 +++++++++++++++--- 2 files changed, 626 insertions(+), 84 deletions(-) diff --git a/Orange/widgets/evaluate/owpredictions.py b/Orange/widgets/evaluate/owpredictions.py index 09522175fbd..21be2dff78c 100644 --- a/Orange/widgets/evaluate/owpredictions.py +++ b/Orange/widgets/evaluate/owpredictions.py @@ -1,3 +1,4 @@ +import math import warnings from contextlib import contextmanager from functools import partial @@ -8,7 +9,7 @@ import numpy from AnyQt.QtWidgets import ( QTableView, QSplitter, QToolTip, QStyle, QApplication, QSizePolicy, - QPushButton) + QPushButton, QAbstractItemDelegate) from AnyQt.QtGui import QPainter, QStandardItem, QPen, QColor, QBrush from AnyQt.QtCore import ( Qt, QSize, QRect, QRectF, QPoint, QPointF, QLocale, @@ -44,6 +45,17 @@ ]) +NO_ERR, DIFF_ERROR, ABSDIFF_ERROR, REL_ERROR, ABSREL_ERROR = range(5) +ERROR_OPTS = ["(None)", "Difference", "Absolute difference", + "Relative", "Absolute relative"] +ERROR_TOOLTIPS = [ + "Don't show columns with errors", + "Show difference between predicted and actual value", + "Show absolute difference between predicted and actual value", + "Show relative difference between predicted and actual value", + "Show absolute value of relative difference between predicted and actual value"] + + class OWPredictions(OWWidget): name = "Predictions" icon = "icons/Predictions.svg" @@ -85,12 +97,15 @@ class Error(OWWidget.Error): "Show probabilities for classes in data that are also\n" "known to the model" ] + NO_PROBS, DATA_PROBS, MODEL_PROBS, BOTH_PROBS = range(4) shown_probs = settings.ContextSetting(NO_PROBS) selection = settings.Setting([], schema_only=True) show_scores = settings.Setting(True) TARGET_AVERAGE = "(Average over classes)" target_class = settings.ContextSetting(TARGET_AVERAGE) + show_probability_errors = settings.ContextSetting(True) + show_reg_errors = settings.ContextSetting(DIFF_ERROR) def __init__(self): super().__init__() @@ -112,8 +127,27 @@ def __init__(self): gui.widgetLabel(predopts, "Show probabilities for"), gui.comboBox( predopts, self, "shown_probs", contentsLength=30, - callback=self._update_prediction_delegate) + callback=self._update_prediction_delegate), ] + + self._cls_error_controls = [ + gui.checkBox( + predopts, self, "show_probability_errors", + "Show classification errors", + tooltip="Show 1 - probability assigned to the correct class", + callback=self._update_errors_visibility + ) + ] + + err_label = gui.widgetLabel(predopts, "Shown regression error: ") + err_combo = gui.comboBox( + predopts, self, "show_reg_errors", items=ERROR_OPTS, + callback=self._reg_error_changed, + toolTip="See tooltips for individual options") + self._reg_error_controls = [err_label, err_combo] + for i, tip in enumerate(ERROR_TOOLTIPS): + err_combo.setItemData(i, tip, Qt.ToolTipRole) + gui.rubber(predopts) self.reset_button = button = QPushButton("Restore Original Order") button.clicked.connect(self._reset_order) @@ -272,16 +306,37 @@ def _set_target_combos(self): item.setFlags(item.flags() & ~Qt.ItemIsEnabled) def _update_control_visibility(self): + visible_prob = self.is_discrete_class \ + or any(slot.predictor.domain.has_discrete_class + for slot in self.predictors) for widget in self._prob_controls: - widget.setVisible(self.is_discrete_class - or any(slot.predictor.domain.has_discrete_class - for slot in self.predictors)) + widget.setVisible(visible_prob) + + for widget in self._cls_error_controls: + widget.setVisible(self.is_discrete_class) + for widget in self._reg_error_controls: + widget.setVisible(bool(self.class_var) and not self.is_discrete_class) for widget in self._target_controls: widget.setVisible(self.is_discrete_class and self.show_scores) self.score_opt_box.setVisible(bool(self.class_var)) + def _reg_error_changed(self): + self.predictionsview.model().setRegErrorType(self.show_reg_errors) + self._update_prediction_delegate() + + def _update_errors_visibility(self): + shown = self.class_var and ( + self.show_probability_errors if self.is_discrete_class + else self.show_reg_errors != NO_ERR) + view = self.predictionsview + for col, slot in enumerate(self.predictors): + view.setColumnHidden( + 2 * col + 1, + not shown or + self.is_discrete_class is not slot.predictor.domain.has_discrete_class) + def _set_class_values(self): self.class_values = [] if self.is_discrete_class: @@ -509,7 +564,7 @@ def _update_predictions_model(self): model = PredictionsModel( all_values, all_probs, self.data.Y if self.class_var else None, - headers) + headers, reg_error_type=self.show_reg_errors) model.list_sorted.connect( partial( self._update_data_sort_order, self.predictionsview, @@ -633,6 +688,7 @@ def _update_prediction_delegate(self): maxv = numpy.nanmax(self.data.Y) else: minv = maxv = numpy.nan + model = self.predictionsview.model() for col, slot in enumerate(self._non_errored_predictors()): target = slot.predictor.domain.class_var if target is not None and target.is_discrete: @@ -641,8 +697,11 @@ def _update_prediction_delegate(self): tooltip_probs = [self.class_values[i] for i in shown_probs if i is not None] delegate = ClassificationItemDelegate( - self.class_values, colors, shown_probs, tooltip_probs, - parent=self.predictionsview) + self.class_values, colors, shown_probs, tooltip_probs) + if self.is_discrete_class: + error_delegate = ClassificationErrorDelegate() + else: + error_delegate = NoopItemDelegate() sort_col_indices.append([col for col in shown_probs if col is not None]) @@ -653,16 +712,30 @@ def _update_prediction_delegate(self): RuntimeWarning) minpv = numpy.nanmin([minv, numpy.nanmin(predictions)]) maxpv = numpy.nanmax([maxv, numpy.nanmax(predictions)]) - delegate = RegressionItemDelegate( - target.format_str if target is not None else None, - minpv, maxpv, - parent=self.predictionsview) + format_str = target.format_str if target is not None else None + delegate = RegressionItemDelegate(format_str, minpv, maxpv) + + if self.show_reg_errors == NO_ERR \ + or self.class_var is None or self.is_discrete_class: + error_delegate = NoopItemDelegate() + else: + errors = model.errorColumn(col) + centered = self.show_reg_errors in (REL_ERROR, DIFF_ERROR) + span = numpy.nanmax(numpy.abs(errors)) + error_delegate = RegressionErrorDelegate( + format_str, centered, span) sort_col_indices.append(None) # QAbstractItemView does not take ownership of delegates, so we must + delegate.setParent(self.predictionsview) self._delegates.append(delegate) - self.predictionsview.setItemDelegateForColumn(col, delegate) - self.predictionsview.setColumnHidden(col, False) + error_delegate.setParent(self.predictionsview) + self._delegates.append(error_delegate) + self.predictionsview.setItemDelegateForColumn(2 * col, delegate) + self.predictionsview.setColumnHidden(2 * col, False) + self.predictionsview.setItemDelegateForColumn(2 * col + 1, error_delegate) + + self._update_errors_visibility() self.predictionsview.resizeColumnsToContents() self._recompute_splitter_sizes() @@ -882,7 +955,7 @@ class DataItemDelegate(ItemDelegate): pass -class PredictionsItemDelegate(ItemDelegate): +class PredictionsBarItemDelegate(ItemDelegate): """ A base Item Delegate for formatting and drawing predictions/probabilities """ @@ -893,12 +966,6 @@ def __init__(self, parent=None): super().__init__(parent) self.fmt = "" - def displayText(self, value, _): - if value is None: - return "" - value, dist = value - return self.fmt.format(value=value, dist=dist) - def sizeHint(self, option, index): # reimplemented sh = super().sizeHint(option, index) @@ -961,6 +1028,14 @@ def drawBar(self, painter, option, index, rect): pass # pragma: no cover +class PredictionsItemDelegate(PredictionsBarItemDelegate): + def displayText(self, value, _): + if value is None: + return "" + value, dist = value + return self.fmt.format(value=value, dist=dist) + + class ClassificationItemDelegate(PredictionsItemDelegate): def __init__( self, class_values, colors, shown_probabilities=(), @@ -970,7 +1045,7 @@ def __init__( self.colors = [QColor(*c) for c in colors] self.shown_probabilities = shown_probabilities - self.fmt = "" + if shown_probabilities: probs = " : ".join(f"{{dist[{i}]:.2f}}" if i is not None else "-" for i in shown_probabilities) @@ -1016,6 +1091,51 @@ def drawBar(self, painter, option, index, rect): painter.restore() +class ErrorDelegate(PredictionsBarItemDelegate): + __size_hint = None + + @classmethod + def sizeHint(cls, option, index): + if cls.__size_hint is None: + if option.widget is not None: + style = option.widget.style() + else: + style = QApplication.style() + margin = style.pixelMetric( + QStyle.PM_FocusFrameHMargin, option, option.widget) + 1 + cls.__size_hint = QSize( + 2 * margin + option.fontMetrics.horizontalAdvance("X" * 6), + 1) + return cls.__size_hint + + +class NoopItemDelegate(QAbstractItemDelegate): + def paint(self, *_): + pass + + @staticmethod + def sizeHint(*_): + return QSize(0, 0) + + +class ClassificationErrorDelegate(ErrorDelegate): + def displayText(self, value, _): + return "?" if numpy.isnan(value) else f"{value:.3f}" + + def drawBar(self, painter, option, index, rect): + value = self.cachedData(index, Qt.DisplayRole) + if numpy.isnan(value): + return + + painter.save() + painter.translate(rect.topLeft()) + length = rect.width() * value + height = rect.height() + painter.setBrush(QColor(255, 0, 0)) + painter.drawRect(QRectF(0, 0, length, height)) + painter.restore() + + class RegressionItemDelegate(PredictionsItemDelegate): def __init__(self, target_format: Optional[str]=None, @@ -1062,23 +1182,69 @@ def drawBar(self, painter, option, index, rect): painter.restore() +class RegressionErrorDelegate(ErrorDelegate): + def __init__(self, fmt, centered, span, parent=None): + super().__init__(parent) + self.format = fmt + self.centered = centered + self.span = span # can be 0 if no errors, or None if they're hidden + + def initStyleOption(self, option, index): + super().initStyleOption(option, index) + option.displayAlignment = \ + (option.displayAlignment & Qt.AlignVertical_Mask) | \ + (Qt.AlignCenter if self.centered else Qt.AlignRight) + + def displayText(self, value, _): + if not self.format: + return "" + if numpy.isnan(value): + return "?" + if numpy.isneginf(value): + return "-∞" + if numpy.isinf(value): + return "∞" + return self.format % value + + def drawBar(self, painter, option, index, rect): + if not self.span: # can be 0 if no errors, or None if they're hidden + return + error = self.cachedData(index, Qt.DisplayRole) + if numpy.isnan(error): + return + scaled = error / self.span + + painter.save() + painter.translate(rect.topLeft()) + width = rect.width() + height = rect.height() + if self.centered: + painter.setBrush(QColor(0, 0, 255) if error < 0 else QColor(255, 0, 0)) + painter.drawRect(QRectF(width / 2, 0, width / 2 * scaled, height)) + else: + painter.setBrush(QColor(255, 0, 0)) + painter.drawRect(QRectF(0, 0, width * scaled, height)) + painter.restore() + + class PredictionsModel(AbstractSortTableModel): list_sorted = pyqtSignal() def __init__(self, values=None, probs=None, actual=None, - headers=None, parent=None): + headers=None, reg_error_type=NO_ERR, parent=None): super().__init__(parent) self._values = values self._probs = probs self._actual = actual self.__probInd = None + self._reg_err_type = reg_error_type if values is not None: assert len(values) == len(probs) != 0 assert len(values[0]) == len(probs[0]) assert actual is None or len(probs[0]) == len(actual) sizes = {len(x) for c in (values, probs) for x in c} assert len(sizes) == 1 - self.__columnCount = len(values) + self.__columnCount = 2 * len(values) self.__rowCount = sizes.pop() if headers is None: headers = [None] * self.__columnCount @@ -1094,11 +1260,40 @@ def rowCount(self, parent=QModelIndex()): def columnCount(self, parent=QModelIndex()): return 0 if parent.isValid() else self.__columnCount + def setRegErrorType(self, err_type): + self._reg_err_type = err_type + def data(self, index, role=Qt.DisplayRole): row = self.mapToSourceRows(index.row()) if role in (Qt.DisplayRole, Qt.EditRole): column = index.column() - return self._values[column][row], self._probs[column][row] + error_column = column % 2 == 1 + column //= 2 + if error_column: + if self._actual is None: + return None + actual = self._actual[row] + if numpy.isnan(actual): + return None + elif self._probs[column].size: + return 1 - self._probs[column][row, int(actual)] + else: + diff = self._values[column][row] - actual + if self._reg_err_type == DIFF_ERROR: + return diff + elif self._reg_err_type == ABSDIFF_ERROR: + return abs(diff) + elif actual == diff == 0: + return 0 + elif self._reg_err_type == REL_ERROR: + return diff / abs(actual) if actual != 0 \ + else math.copysign(numpy.inf, diff) + elif self._reg_err_type == ABSREL_ERROR: + return abs(diff / actual) if actual != 0 else numpy.inf + else: + return None + else: + return self._values[column][row], self._probs[column][row] if role == Qt.UserRole: return self._actual[row] if self._actual is not None else numpy.nan return None @@ -1107,15 +1302,49 @@ def headerData(self, section, orientation, role=Qt.DisplayRole): if role == Qt.DisplayRole: if orientation == Qt.Vertical: return str(section + 1) - elif self._header is not None and section < len(self._header): - return self._header[section] + elif self._header is not None and section < 2 * len(self._header): + if section % 2 == 1: + return "error" + else: + return self._header[section // 2] return None + def errorColumn(self, column): + probs = self._probs[column] + if probs is not None and probs.size: + actuals = self._actual.copy() + nans = numpy.isnan(actuals) + actuals[nans] = 0 + errors = 1 - numpy.choose(actuals.astype(int), self._probs[column].T) + errors[nans] = 2 + errors[numpy.isnan(errors)] = 2 + return errors + else: + actual = self._actual + diff = self._values[column] - actual + if self._reg_err_type == DIFF_ERROR: + return diff + elif self._reg_err_type == ABSDIFF_ERROR: + return numpy.abs(diff) + # we want inf's here + with numpy.errstate(divide="ignore", invalid="ignore"): + rel = diff / numpy.abs(actual) + rel[diff == 0] = 0 # 0 / 0 will become nan in previous line + if self._reg_err_type == REL_ERROR: + return rel + elif self._reg_err_type == ABSREL_ERROR: + return numpy.abs(rel) + else: + return numpy.zeros(len(actual)) + def setProbInd(self, indicess): self.__probInd = indicess self.sort(self.sortColumn(), self.sortOrder()) def sortColumnData(self, column): + if column % 2 == 1: + return self.errorColumn(column // 2) + column //= 2 values = self._values[column] probs = self._probs[column] # Let us assume that probs can be None, numpy array or list of arrays @@ -1132,7 +1361,6 @@ def sort(self, column, order=Qt.AscendingOrder): super().sort(column, order) self.list_sorted.emit() - # PredictionsModel and DataModel have the same signal and sort method, but # extracting them into a mixin (because they're derived from different classes) # would be more complicated and longer than some code repetition. diff --git a/Orange/widgets/evaluate/tests/test_owpredictions.py b/Orange/widgets/evaluate/tests/test_owpredictions.py index 5be547582c3..9c40fdfd3eb 100644 --- a/Orange/widgets/evaluate/tests/test_owpredictions.py +++ b/Orange/widgets/evaluate/tests/test_owpredictions.py @@ -2,6 +2,8 @@ # pylint: disable=protected-access import io import unittest +from functools import partial +from typing import Optional from unittest.mock import Mock, patch import numpy as np @@ -11,7 +13,7 @@ from Orange.base import Model from Orange.classification import LogisticRegressionLearner, NaiveBayesLearner -from Orange.classification.majority import ConstantModel +from Orange.classification.majority import ConstantModel, MajorityLearner from Orange.data.io import TabReader from Orange.evaluation.scoring import TargetScore from Orange.preprocess import Remove @@ -20,7 +22,9 @@ from Orange.widgets.evaluate.owpredictions import ( OWPredictions, SharedSelectionModel, SharedSelectionStore, DataModel, PredictionsModel, - PredictionsItemDelegate, ClassificationItemDelegate, RegressionItemDelegate) + PredictionsItemDelegate, ClassificationItemDelegate, RegressionItemDelegate, + NoopItemDelegate, RegressionErrorDelegate, ClassificationErrorDelegate, + NO_ERR, DIFF_ERROR, ABSDIFF_ERROR, REL_ERROR, ABSREL_ERROR) from Orange.widgets.evaluate.owcalibrationplot import OWCalibrationPlot from Orange.widgets.evaluate.owconfusionmatrix import OWConfusionMatrix from Orange.widgets.evaluate.owliftcurve import OWLiftCurve @@ -42,6 +46,9 @@ def setUp(self): self.iris_classless = self.iris.transform(Domain(self.iris.domain.attributes, [])) self.housing = Table("housing") + def test_minimum_size(self): + pass + def test_rowCount_from_model(self): """Don't crash if the bottom row is visible""" self.send_signal(self.widget.Inputs.data, self.iris[:5]) @@ -674,11 +681,11 @@ def test_update_prediction_delegate_discrete(self): widget.shown_probs = widget.DATA_PROBS widget._update_prediction_delegate() self.assertEqual(widget._delegates[0].shown_probabilities, [0, 1, 2]) - self.assertEqual(widget._delegates[1].shown_probabilities, [0, 1, None]) - self.assertEqual(widget._delegates[1].shown_probabilities, [0, 1, None]) - self.assertEqual(widget._delegates[2].shown_probabilities, [None, 1, 2]) - self.assertEqual(widget._delegates[3].shown_probabilities, [None, None, None]) - for delegate in widget._delegates[:-1]: + self.assertEqual(widget._delegates[2].shown_probabilities, [0, 1, None]) + self.assertEqual(widget._delegates[2].shown_probabilities, [0, 1, None]) + self.assertEqual(widget._delegates[4].shown_probabilities, [None, 1, 2]) + self.assertEqual(widget._delegates[6].shown_probabilities, [None, None, None]) + for delegate in widget._delegates[:-1:2]: self.assertEqual(delegate.tooltip, "p(a, b, c)") set_prob_ind.assert_called_with([[0, 1, 2], [0, 1], [1, 2], []]) @@ -686,34 +693,34 @@ def test_update_prediction_delegate_discrete(self): widget._update_prediction_delegate() self.assertEqual(widget._delegates[0].shown_probabilities, [0, 1, 2]) self.assertEqual(widget._delegates[0].tooltip, "p(a, b, c)") - self.assertEqual(widget._delegates[1].shown_probabilities, [0, 1]) - self.assertEqual(widget._delegates[1].tooltip, "p(a, b)") - self.assertEqual(widget._delegates[2].shown_probabilities, [2, 1, 3]) - self.assertEqual(widget._delegates[2].tooltip, "p(c, b, d)") - self.assertEqual(widget._delegates[3].shown_probabilities, [4]) - self.assertEqual(widget._delegates[3].tooltip, "p(e)") + self.assertEqual(widget._delegates[2].shown_probabilities, [0, 1]) + self.assertEqual(widget._delegates[2].tooltip, "p(a, b)") + self.assertEqual(widget._delegates[4].shown_probabilities, [2, 1, 3]) + self.assertEqual(widget._delegates[4].tooltip, "p(c, b, d)") + self.assertEqual(widget._delegates[6].shown_probabilities, [4]) + self.assertEqual(widget._delegates[6].tooltip, "p(e)") set_prob_ind.assert_called_with([[0, 1, 2], [0, 1], [2, 1, 3], [4]]) widget.shown_probs = widget.BOTH_PROBS widget._update_prediction_delegate() self.assertEqual(widget._delegates[0].shown_probabilities, [0, 1, 2]) self.assertEqual(widget._delegates[0].tooltip, "p(a, b, c)") - self.assertEqual(widget._delegates[1].shown_probabilities, [0, 1]) - self.assertEqual(widget._delegates[1].tooltip, "p(a, b)") - self.assertEqual(widget._delegates[2].shown_probabilities, [1, 2]) - self.assertEqual(widget._delegates[2].tooltip, "p(b, c)") - self.assertEqual(widget._delegates[3].shown_probabilities, []) - self.assertEqual(widget._delegates[3].tooltip, "") + self.assertEqual(widget._delegates[2].shown_probabilities, [0, 1]) + self.assertEqual(widget._delegates[2].tooltip, "p(a, b)") + self.assertEqual(widget._delegates[4].shown_probabilities, [1, 2]) + self.assertEqual(widget._delegates[4].tooltip, "p(b, c)") + self.assertEqual(widget._delegates[6].shown_probabilities, []) + self.assertEqual(widget._delegates[6].tooltip, "") set_prob_ind.assert_called_with([[0, 1, 2], [0, 1], [1, 2], []]) n_fixed = len(widget.PROB_OPTS) widget.shown_probs = n_fixed # a widget._update_prediction_delegate() self.assertEqual(widget._delegates[0].shown_probabilities, [0]) - self.assertEqual(widget._delegates[1].shown_probabilities, [0]) - self.assertEqual(widget._delegates[2].shown_probabilities, [None]) - self.assertEqual(widget._delegates[3].shown_probabilities, [None]) - for delegate in widget._delegates[:-1]: + self.assertEqual(widget._delegates[2].shown_probabilities, [0]) + self.assertEqual(widget._delegates[4].shown_probabilities, [None]) + self.assertEqual(widget._delegates[6].shown_probabilities, [None]) + for delegate in widget._delegates[:-1:2]: self.assertEqual(delegate.tooltip, "p(a)") set_prob_ind.assert_called_with([[0], [0], [], []]) @@ -721,10 +728,10 @@ def test_update_prediction_delegate_discrete(self): widget.shown_probs = n_fixed + 1 # b widget._update_prediction_delegate() self.assertEqual(widget._delegates[0].shown_probabilities, [1]) - self.assertEqual(widget._delegates[1].shown_probabilities, [1]) self.assertEqual(widget._delegates[2].shown_probabilities, [1]) - self.assertEqual(widget._delegates[3].shown_probabilities, [None]) - for delegate in widget._delegates[:-1]: + self.assertEqual(widget._delegates[4].shown_probabilities, [1]) + self.assertEqual(widget._delegates[6].shown_probabilities, [None]) + for delegate in widget._delegates[:-1:2]: self.assertEqual(delegate.tooltip, "p(b)") set_prob_ind.assert_called_with([[1], [1], [1], []]) @@ -732,10 +739,10 @@ def test_update_prediction_delegate_discrete(self): widget.shown_probs = n_fixed + 2 # c widget._update_prediction_delegate() self.assertEqual(widget._delegates[0].shown_probabilities, [2]) - self.assertEqual(widget._delegates[1].shown_probabilities, [None]) - self.assertEqual(widget._delegates[2].shown_probabilities, [2]) - self.assertEqual(widget._delegates[3].shown_probabilities, [None]) - for delegate in widget._delegates[:-1]: + self.assertEqual(widget._delegates[2].shown_probabilities, [None]) + self.assertEqual(widget._delegates[4].shown_probabilities, [2]) + self.assertEqual(widget._delegates[6].shown_probabilities, [None]) + for delegate in widget._delegates[:-1:2]: self.assertEqual(delegate.tooltip, "p(c)") set_prob_ind.assert_called_with([[2], [], [2], []]) @@ -807,13 +814,13 @@ def predict(self, X): self.assertEqual(delegate.offset, 10) self.assertEqual(delegate.span, 5) - delegate = widget.predictionsview.itemDelegateForColumn(1) + delegate = widget.predictionsview.itemDelegateForColumn(2) # values for model are all-nan, Y goes from 12 to 15 (incl) self.assertIsInstance(delegate, RegressionItemDelegate) self.assertEqual(delegate.offset, 12) self.assertEqual(delegate.span, 3) - delegate = widget.predictionsview.itemDelegateForColumn(2) + delegate = widget.predictionsview.itemDelegateForColumn(4) self.assertIsInstance(delegate, ClassificationItemDelegate) data = Table(domain, x, np.full(5, np.nan)) @@ -824,13 +831,13 @@ def predict(self, X): self.assertEqual(delegate.offset, 10) self.assertEqual(delegate.span, 4) - delegate = widget.predictionsview.itemDelegateForColumn(1) + delegate = widget.predictionsview.itemDelegateForColumn(2) # values for model and y are nan self.assertIsInstance(delegate, RegressionItemDelegate) self.assertEqual(delegate.offset, 0) self.assertEqual(delegate.span, 1) - delegate = widget.predictionsview.itemDelegateForColumn(2) + delegate = widget.predictionsview.itemDelegateForColumn(4) self.assertIsInstance(delegate, ClassificationItemDelegate) class _Scorer(TargetScore): @@ -1022,14 +1029,126 @@ def test_multi_target_input(self): mock_model = Mock(spec=Model, return_value=np.asarray([0.2, 0.1])) mock_model.name = 'Mockery' mock_model.domain = domain - mock_learner = Mock(return_value=mock_model) - model = mock_learner(data) self.send_signal(widget.Inputs.data, data) - self.send_signal(widget.Inputs.predictors, model, 1) + self.send_signal(widget.Inputs.predictors, mock_model, 1) pred = self.get_output(widget.Outputs.predictions) self.assertIsInstance(pred, Table) + def test_error_controls_visibility(self): + widget = self.widget + senddata = partial(self.send_signal, widget.Inputs.data) + sendpredictor = partial(self.send_signal, widget.Inputs.predictors) + clshidden = widget._cls_error_controls[0].isHidden + reghidden = widget._reg_error_controls[0].isHidden + colhidden = widget.predictionsview.isColumnHidden + delegate = widget.predictionsview.itemDelegateForColumn + + iris = self.iris + regiris = iris.transform(Domain(iris.domain.attributes[:3], + iris.domain.attributes[3])) + riris = MeanLearner()(regiris) + ciris = MajorityLearner()(iris) + + self.assertFalse(clshidden()) + self.assertFalse(reghidden()) + + senddata(self.housing) + self.assertTrue(clshidden()) + self.assertFalse(reghidden()) + + senddata(self.iris) + self.assertFalse(clshidden()) + self.assertTrue(reghidden()) + + senddata(None) + self.assertTrue(clshidden()) + self.assertTrue(reghidden()) + + senddata(self.iris_classless) + self.assertTrue(clshidden()) + self.assertTrue(reghidden()) + + sendpredictor(ciris, 0) + sendpredictor(riris, 1) + self.assertFalse(colhidden(0)) + self.assertTrue(colhidden(1)) + self.assertFalse(colhidden(2)) + self.assertTrue(colhidden(3)) + self.assertIsInstance(delegate(1), NoopItemDelegate) + self.assertIsInstance(delegate(3), NoopItemDelegate) + + senddata(regiris) + self.assertFalse(colhidden(0)) + self.assertTrue(colhidden(1)) + self.assertFalse(colhidden(2)) + self.assertFalse(colhidden(3)) + self.assertIsInstance(delegate(1), NoopItemDelegate) + self.assertIsInstance(delegate(3), RegressionErrorDelegate) + + err_combo = self.widget.controls.show_reg_errors + err_combo.setCurrentIndex(0) + err_combo.activated.emit(0) + self.assertTrue(colhidden(1)) + self.assertTrue(colhidden(3)) + self.assertIsInstance(delegate(1), NoopItemDelegate) + self.assertIsInstance(delegate(3), (RegressionErrorDelegate, + NoopItemDelegate)) + + senddata(iris) + self.assertFalse(colhidden(1)) + self.assertTrue(colhidden(3)) + self.assertIsInstance(delegate(1), ClassificationErrorDelegate) + self.assertIsInstance(delegate(3), NoopItemDelegate) + + err_box = self.widget.controls.show_probability_errors + err_box.click() + self.assertTrue(colhidden(1)) + self.assertIsInstance(delegate(1), (ClassificationErrorDelegate, + NoopItemDelegate)) + self.assertIsInstance(delegate(3), NoopItemDelegate) + + def test_regression_error_delegate_ranges(self): + def set_type(tpe): + combo = widget.controls.show_reg_errors + combo.setCurrentIndex(tpe) + combo.activated.emit(tpe) + + def get_delegate() -> Optional[RegressionErrorDelegate]: + return widget.predictionsview.itemDelegateForColumn(1) + + widget = self.widget + domain = Domain([ContinuousVariable("x")], + ContinuousVariable("y")) + data = Table.from_numpy(domain, np.arange(2, 12)[:, None], np.arange(2, 12)) + model = MeanLearner()(data) + model.mean = 5 + self.send_signal(widget.Inputs.data, data) + self.send_signal(widget.Inputs.predictors, model, 0) + + set_type(NO_ERR) + self.assertIsInstance(get_delegate(), NoopItemDelegate) + + set_type(DIFF_ERROR) + delegate = get_delegate() + self.assertEqual(delegate.span, 6) + self.assertTrue(delegate.centered) + + set_type(ABSDIFF_ERROR) + delegate = get_delegate() + self.assertEqual(delegate.span, 6) + self.assertFalse(delegate.centered) + + set_type(REL_ERROR) + delegate = get_delegate() + self.assertEqual(delegate.span, max(3 / 2, 6 / 11)) + self.assertTrue(delegate.centered) + + set_type(ABSREL_ERROR) + delegate = get_delegate() + self.assertEqual(delegate.span, max(3 / 2, 6 / 11)) + self.assertFalse(delegate.centered) + def test_report(self): widget = self.widget @@ -1296,29 +1415,79 @@ def setUpClass(cls) -> None: def test_model_classification(self): model = PredictionsModel(self.values, self.probs, self.actual) self.assertEqual(model.rowCount(), 5) - self.assertEqual(model.columnCount(), 2) + self.assertEqual(model.columnCount(), 4) - val, prob = model.data(model.index(0, 1)) + val, prob = model.data(model.index(0, 2)) self.assertEqual(val, 0) np.testing.assert_equal(prob, [0.8, 0, 0.2]) - val, prob = model.data(model.index(3, 1)) + val, prob = model.data(model.index(3, 2)) self.assertEqual(val, 1) np.testing.assert_equal(prob, [0.1, 0.6, 0.3]) + def test_model_classification_errors(self): + model = PredictionsModel(self.values, self.probs, self.actual) + + np.testing.assert_almost_equal( + [model.data(model.index(row, 1)) for row in range(5)], + 1 - np.array([80, 70, 5, 10, 55]) / 100) + + np.testing.assert_almost_equal( + [model.data(model.index(row, 3)) for row in range(5)], + 1 - np.array([80, 5, 20, 60, 50]) / 100) + def test_model_regression(self): model = PredictionsModel(self.values, self.no_probs, self.actual) self.assertEqual(model.rowCount(), 5) - self.assertEqual(model.columnCount(), 2) + self.assertEqual(model.columnCount(), 4) - val, prob = model.data(model.index(0, 1)) + val, prob = model.data(model.index(0, 2)) self.assertEqual(val, 0) np.testing.assert_equal(prob, []) - val, prob = model.data(model.index(3, 1)) + val, prob = model.data(model.index(3, 2)) self.assertEqual(val, 1) np.testing.assert_equal(prob, []) + def test_model_regression_errors(self): + actual = np.array([40, 0, 12, 0, -45]) + model = PredictionsModel(values=np.array([[0] * 5, + [30, 0, 12, -5, -40]]), + probs=self.no_probs, + actual=actual, + reg_error_type=NO_ERR) + + self.assertIsNone(model.data(model.index(0, 1))) + + model.setRegErrorType(DIFF_ERROR) + diff_error = np.array([-10, 0, 0, -5, 5]) + np.testing.assert_almost_equal( + [model.data(model.index(row, 3)) for row in range(5)], + diff_error) + np.testing.assert_almost_equal(model.errorColumn(1), diff_error) + + model.setRegErrorType(ABSDIFF_ERROR) + np.testing.assert_almost_equal( + [model.data(model.index(row, 3)) for row in range(5)], + np.abs(diff_error)) + np.testing.assert_almost_equal(model.errorColumn(1), np.abs(diff_error)) + + model.setRegErrorType(REL_ERROR) + rel_error = [-10 / 40, 0, 0, -np.inf, 5 / 45] + np.testing.assert_almost_equal( + [model.data(model.index(row, 3)) for row in range(5)], rel_error) + np.testing.assert_almost_equal(model.errorColumn(1), rel_error) + + model.setRegErrorType(ABSREL_ERROR) + np.testing.assert_almost_equal( + [model.data(model.index(row, 3)) for row in range(5)], np.abs(rel_error)) + np.testing.assert_almost_equal(model.errorColumn(1), np.abs(rel_error)) + + model.setRegErrorType(DIFF_ERROR) + np.testing.assert_almost_equal( + [model.data(model.index(row, 1)) for row in range(5)], -actual) + np.testing.assert_almost_equal(model.errorColumn(0), -actual) + def test_model_actual(self): model = PredictionsModel(self.values, self.no_probs, self.actual) self.assertEqual(model.data(model.index(2, 0), Qt.UserRole), @@ -1336,12 +1505,16 @@ def test_model_header(self): model = PredictionsModel(self.values, self.probs, self.actual, ["a", "b"]) self.assertEqual(model.headerData(0, Qt.Horizontal), "a") - self.assertEqual(model.headerData(1, Qt.Horizontal), "b") - self.assertEqual(model.headerData(3, Qt.Vertical), "4") + self.assertEqual(model.headerData(1, Qt.Horizontal), "error") + self.assertEqual(model.headerData(2, Qt.Horizontal), "b") + self.assertEqual(model.headerData(3, Qt.Horizontal), "error") + self.assertIsNone(model.headerData(4, Qt.Horizontal)) + self.assertEqual(model.headerData(4, Qt.Vertical), "5") model = PredictionsModel(self.values, self.probs, self.actual, ["a"]) self.assertEqual(model.headerData(0, Qt.Horizontal), "a") - self.assertIsNone(model.headerData(1, Qt.Horizontal)) + self.assertEqual(model.headerData(1, Qt.Horizontal), "error") + self.assertIsNone(model.headerData(2, Qt.Horizontal)) self.assertEqual(model.headerData(3, Qt.Vertical), "4") def test_model_empty(self): @@ -1353,11 +1526,11 @@ def test_model_empty(self): def test_sorting_classification(self): model = PredictionsModel(self.values, self.probs, self.actual) - val, prob = model.data(model.index(0, 1)) + val, prob = model.data(model.index(0, 2)) self.assertEqual(val, 0) np.testing.assert_equal(prob, [0.8, 0, 0.2]) - val, prob = model.data(model.index(3, 1)) + val, prob = model.data(model.index(3, 2)) self.assertEqual(val, 1) np.testing.assert_equal(prob, [0.1, 0.6, 0.3]) @@ -1366,13 +1539,13 @@ def test_sorting_classification(self): val, prob = model.data(model.index(0, 0)) self.assertEqual(val, 2) np.testing.assert_equal(prob, [0, 0.1, 0.9]) - val, prob = model.data(model.index(0, 1)) + val, prob = model.data(model.index(0, 2)) self.assertEqual(val, 1) np.testing.assert_equal(prob, [0.1, 0.6, 0.3]) model.setProbInd([[2], [2]]) - model.sort(1, Qt.AscendingOrder) - val, prob = model.data(model.index(0, 1)) + model.sort(2, Qt.AscendingOrder) + val, prob = model.data(model.index(0, 2)) self.assertEqual(val, 0) np.testing.assert_equal(prob, [0.9, 0.05, 0.05]) val, prob = model.data(model.index(0, 0)) @@ -1399,6 +1572,27 @@ def test_sorting_classification(self): self.assertEqual([model.data(model.index(i, 0))[0] for i in range(model.rowCount())], [2, 1, 1, 0, 0]) + def test_sorting_classification_error(self): + model = PredictionsModel(self.values, self.probs, self.actual) + + np.testing.assert_almost_equal( + [model.data(model.index(row, 1)) for row in range(5)], + 1 - np.array([80, 70, 5, 10, 55]) / 100) + + np.testing.assert_almost_equal( + [model.data(model.index(row, 3)) for row in range(5)], + 1 - np.array([80, 5, 20, 60, 50]) / 100) + + model.sort(1, Qt.AscendingOrder) + np.testing.assert_almost_equal( + [model.data(model.index(row, 1)) for row in range(5)], + 1 - np.array(sorted([80, 70, 5, 10, 55], reverse=True)) / 100) + + model.sort(3, Qt.DescendingOrder) + np.testing.assert_almost_equal( + [model.data(model.index(row, 3)) for row in range(5)], + 1 - np.array(sorted([80, 5, 20, 60, 50])) / 100) + def test_sorting_classification_different(self): model = PredictionsModel(self.values, self.probs, self.actual) @@ -1407,22 +1601,22 @@ def test_sorting_classification_different(self): val, prob = model.data(model.index(0, 0)) self.assertEqual(val, 2) np.testing.assert_equal(prob, [0, 0.1, 0.9]) - val, prob = model.data(model.index(0, 1)) + val, prob = model.data(model.index(0, 2)) self.assertEqual(val, 1) np.testing.assert_equal(prob, [0.1, 0.6, 0.3]) - model.sort(1, Qt.DescendingOrder) + model.sort(2, Qt.DescendingOrder) val, prob = model.data(model.index(0, 0)) self.assertEqual(val, 1) np.testing.assert_equal(prob, [0.3, 0.7, 0]) - val, prob = model.data(model.index(0, 1)) + val, prob = model.data(model.index(0, 2)) self.assertEqual(val, 0) np.testing.assert_equal(prob, [0.9, 0.05, 0.05]) def test_sorting_regression(self): model = PredictionsModel(self.values, self.no_probs, self.actual) - self.assertEqual(model.data(model.index(0, 1))[0], 0) - self.assertEqual(model.data(model.index(3, 1))[0], 1) + self.assertEqual(model.data(model.index(0, 2))[0], 0) + self.assertEqual(model.data(model.index(3, 2))[0], 1) model.setProbInd([2]) model.sort(0, Qt.AscendingOrder) @@ -1439,6 +1633,39 @@ def test_sorting_regression(self): self.assertEqual([model.data(model.index(i, 0))[0] for i in range(model.rowCount())], [0, 0, 1, 1, 2]) + def test_sorting_regression_error(self): + actual = np.array([40, 0, 12, 0, -45]) + model = PredictionsModel(values=np.array([[30, 0, 12, -5, -40]]), + probs=self.no_probs[:1], + actual=actual, + reg_error_type=NO_ERR) + + model.setRegErrorType(DIFF_ERROR) + model.sort(1, Qt.AscendingOrder) + diff_error = [-10, 0, 0, -5, 5] + np.testing.assert_almost_equal( + [model.data(model.index(row, 1)) for row in range(5)], + sorted(diff_error)) + + model.setRegErrorType(ABSDIFF_ERROR) + model.sort(1, Qt.AscendingOrder) + np.testing.assert_almost_equal( + [model.data(model.index(row, 1)) for row in range(5)], + sorted(np.abs(diff_error))) + + model.setRegErrorType(REL_ERROR) + rel_error = [-10 / 40, 0, 0, -np.inf, 5 / 45] + model.sort(1, Qt.AscendingOrder) + np.testing.assert_almost_equal( + [model.data(model.index(row, 1)) for row in range(5)], + sorted(rel_error)) + + model.setRegErrorType(ABSREL_ERROR) + model.sort(1, Qt.AscendingOrder) + np.testing.assert_almost_equal( + [model.data(model.index(row, 1)) for row in range(5)], + sorted(np.abs(rel_error))) + class TestPredictionsItemDelegate(GuiTest): def test_displayText(self): @@ -1500,6 +1727,13 @@ def test_drawbar(self): self.assertEqual(rect.height(), 16) +class TestNoopItemDelegate(GuiTest): + def test_donothing(self): + delegate = NoopItemDelegate() + delegate.paint(Mock(), Mock(), Mock(), Mock()) + delegate.sizeHint() + + class TestRegressionItemDelegate(GuiTest): def test_format(self): delegate = RegressionItemDelegate("%6.3f") @@ -1588,5 +1822,85 @@ def test_drawBar(self): el.reset_mock() +class TestClassificationErrorDelegate(GuiTest): + def test_displayText(self): + delegate = ClassificationErrorDelegate() + self.assertEqual(delegate.displayText(0.12345, Mock()), "0.123") + self.assertEqual(delegate.displayText(np.nan, Mock()), "?") + + def test_drawBar(self): + delegate = ClassificationErrorDelegate() + painter = Mock() + dr = painter.drawRect + index = Mock() + rect = QRect(0, 0, 256, 16) + + delegate.cachedData = lambda *_: np.nan + delegate.drawBar(painter, Mock(), index, rect) + dr.assert_not_called() + + delegate.cachedData = lambda *_: 1 / 4 + delegate.drawBar(painter, Mock(), index, rect) + dr.assert_called_once() + r = dr.call_args[0][0] + self.assertEqual(r.x(), 0) + self.assertEqual(r.y(), 0) + self.assertEqual(r.width(), 64) + self.assertEqual(r.height(), 16) + + +class TestRegressionErrorDelegate(GuiTest): + def test_displayText(self): + delegate = RegressionErrorDelegate("", True, 4) + self.assertEqual(delegate.displayText(0.1234567, Mock()), "") + + delegate = RegressionErrorDelegate("%.5f", True, 4) + self.assertEqual(delegate.displayText(0.1234567, Mock()), "0.12346") + self.assertEqual(delegate.displayText(np.nan, Mock()), "?") + self.assertEqual(delegate.displayText(np.inf, Mock()), "∞") + self.assertEqual(delegate.displayText(-np.inf, Mock()), "-∞") + + def test_drawBar(self): + painter = Mock() + dr = painter.drawRect + index = Mock() + rect = QRect(0, 0, 256, 16) + + delegate = RegressionErrorDelegate("%.5f", True, 0) + delegate.drawBar(painter, Mock(), index, rect) + dr.assert_not_called() + + delegate = RegressionErrorDelegate("%.5f", True, 12) + + delegate.cachedData = lambda *_: np.nan + delegate.drawBar(painter, Mock(), index, rect) + dr.assert_not_called() + + delegate.cachedData = lambda *_: 3 + delegate.drawBar(painter, Mock(), index, rect) + r = dr.call_args[0][0] + self.assertEqual(r.x(), 128) + self.assertEqual(r.y(), 0) + self.assertEqual(r.width(), 32) + self.assertEqual(r.height(), 16) + + delegate.cachedData = lambda *_: -3 + delegate.drawBar(painter, Mock(), index, rect) + r = dr.call_args[0][0] + self.assertEqual(r.x(), 128) + self.assertEqual(r.y(), 0) + self.assertEqual(r.width(), -32) + self.assertEqual(r.height(), 16) + + delegate = RegressionErrorDelegate("%.5f", False, 12) + delegate.cachedData = lambda *_: 3 + delegate.drawBar(painter, Mock(), index, rect) + r = dr.call_args[0][0] + self.assertEqual(r.x(), 0) + self.assertEqual(r.y(), 0) + self.assertEqual(r.width(), 64) + self.assertEqual(r.height(), 16) + + if __name__ == "__main__": unittest.main()