Skip to content

Commit

Permalink
Predictions: Make the output match the data shown in the view + minor…
Browse files Browse the repository at this point in the history
… fixes
  • Loading branch information
janezd committed Feb 18, 2022
1 parent 82891fe commit 034ceab
Show file tree
Hide file tree
Showing 2 changed files with 171 additions and 42 deletions.
90 changes: 50 additions & 40 deletions Orange/widgets/evaluate/owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@

import numpy
from AnyQt.QtWidgets import (
QTableView, QSplitter, QToolTip, QStyle, QApplication, QSizePolicy)
QTableView, QSplitter, QToolTip, QStyle, QApplication, QSizePolicy,
QPushButton)
from AnyQt.QtGui import QPainter, QStandardItem, QPen, QColor
from AnyQt.QtCore import (
Qt, QSize, QRect, QRectF, QPoint, QLocale,
QModelIndex, pyqtSignal, QTimer,
QItemSelectionModel, QItemSelection)
from AnyQt.QtWidgets import QPushButton

from orangewidget.report import plural
from orangewidget.utils.itemmodels import AbstractSortTableModel
Expand Down Expand Up @@ -108,7 +108,6 @@ def __init__(self):
predopts = gui.hBox(
None, sizePolicy=(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed))
self._prob_controls = [
predopts,
gui.widgetLabel(predopts, "Show probabilities for"),
gui.comboBox(
predopts, self, "shown_probs", contentsLength=30,
Expand All @@ -121,7 +120,7 @@ def __init__(self):
button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
predopts.layout().addWidget(self.reset_button)

scoreopts = gui.hBox(
self.score_opt_box = scoreopts = gui.hBox(
None, sizePolicy=(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed))
gui.checkBox(
scoreopts, self, "show_scores", "Show perfomance scores",
Expand Down Expand Up @@ -275,8 +274,12 @@ def _update_control_visibility(self):
for widget in self._target_controls:
widget.setVisible(self.is_discrete_class and self.show_scores)

self.score_opt_box.setVisible(bool(self.class_var))

def _set_class_values(self):
self.class_values = []
if self.is_discrete_class:
self.class_values += self.data.domain.class_var.values
for slot in self.predictors:
class_var = slot.predictor.domain.class_var
if class_var.is_discrete:
Expand Down Expand Up @@ -601,12 +604,6 @@ def _get_colors(self):
return colors

def _update_prediction_delegate(self):
def index(value):
if value in target.values:
return self.class_values.index(value)
else:
return None

self._delegates.clear()
colors = self._get_colors()
shown_class = "" # just to silence warnings about undefined var
Expand All @@ -624,30 +621,16 @@ def index(value):
if target.is_continuous:
delegate = PredictionsItemDelegate(
None, colors, (), (), target.format_str,
parent=self.predictionsview
)
parent=self.predictionsview)
sort_col_indices.append(None)
else:
if self.shown_probs == self.NO_PROBS:
shown_probs = []
elif self.shown_probs == self.DATA_PROBS:
shown_probs = [
index(value) for value in self.class_var.values]
elif self.shown_probs == self.MODEL_PROBS:
shown_probs = [
index(value) for value in target.values]
tooltip_probs = target.values
elif self.shown_probs == self.BOTH_PROBS:
tooltip_probs = [
value for value in self.class_var.values
if value in target.values]
shown_probs = list(map(index, tooltip_probs))
else:
shown_probs = [index(shown_class)]
shown_probs = self._shown_prob_indices(target, in_target=True)
if self.shown_probs in (self.MODEL_PROBS, self.BOTH_PROBS):
tooltip_probs = [self.class_values[i]
for i in shown_probs if i is not None]
delegate = PredictionsItemDelegate(
self.class_values, colors, shown_probs, tooltip_probs,
parent=self.predictionsview
)
parent=self.predictionsview)
sort_col_indices.append([col for col in shown_probs
if col is not None])
# QAbstractItemView does not take ownership of delegates, so we must
Expand All @@ -660,6 +643,26 @@ def index(value):
if self.predictionsview.model() is not None:
self.predictionsview.model().setProbInd(sort_col_indices)

def _shown_prob_indices(self, target: DiscreteVariable, in_target):
if self.shown_probs == self.NO_PROBS:
values = []
elif self.shown_probs == self.DATA_PROBS:
values = self.class_var.values
elif self.shown_probs == self.MODEL_PROBS:
values = target.values
elif self.shown_probs == self.BOTH_PROBS:
# Don't use set intersection because it's unordered!
values = (value for value in self.class_var.values
if value in target.values)
else:
shown_cls_idx = self.shown_probs - len(self.PROB_OPTS)
values = [self.class_var.values[shown_cls_idx]]

return [self.class_values.index(value)
if not in_target or value in target.values
else None
for value in values]

def _recompute_splitter_sizes(self):
if not self.data:
return
Expand Down Expand Up @@ -729,7 +732,7 @@ def _commit_predictions(self):
predictions = self.data.transform(domain)
if newcolumns:
newcolumns = numpy.hstack(
[numpy.atleast_2d(cols) for cols in newcolumns])
[col.reshape((-1, 1)) for col in newcolumns])
with predictions.unlocked(predictions.metas):
predictions.metas[:, -newcolumns.shape[1]:] = newcolumns

Expand All @@ -749,23 +752,30 @@ def _commit_predictions(self):
predictions = predictions[datamodel.mapToSourceRows(...)]
self.Outputs.predictions.send(predictions)

@staticmethod
def _add_classification_out_columns(slot, newmetas, newcolumns):
# Mapped or unmapped predictions?!
# Or provide a checkbox so the user decides?
def _add_classification_out_columns(self, slot, newmetas, newcolumns):
pred = slot.predictor
name = pred.name
values = pred.domain.class_var.values
probs = slot.results.unmapped_probabilities

# Column with class prediction
newmetas.append(DiscreteVariable(name=name, values=values))
newcolumns.append(slot.results.unmapped_predicted.reshape(-1, 1))
newmetas += [ContinuousVariable(name=f"{name} ({value})")
for value in values]
newcolumns.append(slot.results.unmapped_probabilities)
newcolumns.append(slot.results.unmapped_predicted)

# Columns with probability predictions (same as shown in the view)
for cls_idx in self._shown_prob_indices(pred.domain.class_var,
in_target=False):
value = self.class_values[cls_idx]
newmetas.append(ContinuousVariable(f"{name} ({value})"))
if value in values:
newcolumns.append(probs[:, values.index(value)])
else:
newcolumns.append(numpy.zeros(probs.shape[0]))

@staticmethod
def _add_regression_out_columns(slot, newmetas, newcolumns):
newmetas.append(ContinuousVariable(name=slot.predictor.name))
newcolumns.append(slot.results.unmapped_predicted.reshape((-1, 1)))
newcolumns.append(slot.results.unmapped_predicted)

def send_report(self):
def merge_data_with_predictions():
Expand Down
123 changes: 121 additions & 2 deletions Orange/widgets/evaluate/tests/test_owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,11 @@
from AnyQt.QtCore import QItemSelectionModel, QItemSelection, Qt

from Orange.base import Model
from Orange.classification import LogisticRegressionLearner
from Orange.classification import LogisticRegressionLearner, NaiveBayesLearner
from Orange.data.io import TabReader
from Orange.evaluation.scoring import TargetScore
from Orange.regression import LinearRegressionLearner
from Orange.preprocess import Remove
from Orange.regression import LinearRegressionLearner, MeanLearner
from Orange.widgets.tests.base import WidgetTest
from Orange.widgets.evaluate.owpredictions import (
OWPredictions, SharedSelectionModel, SharedSelectionStore, DataModel,
Expand Down Expand Up @@ -732,6 +733,124 @@ class _Scorer(TargetScore):
def compute_score(self, _, target, **__):
return [42 if target is None else target]

def test_output_wrt_shown_probs_1(self):
"""Data has one class less, models have same, different or one more"""
widget = self.widget
iris012 = self.iris
purge = Remove(class_flags=Remove.RemoveUnusedValues)
iris01 = purge(iris012[:100])
iris12 = purge(iris012[50:])

bayes01 = NaiveBayesLearner()(iris01)
bayes12 = NaiveBayesLearner()(iris12)
bayes012 = NaiveBayesLearner()(iris012)

self.send_signal(widget.Inputs.data, iris01)
self.send_signal(widget.Inputs.predictors, bayes01, 0)
self.send_signal(widget.Inputs.predictors, bayes12, 1)
self.send_signal(widget.Inputs.predictors, bayes012, 2)

for i, pred in enumerate(widget.predictors):
p = pred.results.unmapped_probabilities
p[0] = 10 + 100 * i + np.arange(p.shape[1])
pred.results.unmapped_predicted[:] = i

widget.shown_probs = widget.NO_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 1, 2])

widget.shown_probs = widget.DATA_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 0, 110, 2, 210, 211])

widget.shown_probs = widget.MODEL_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 2, 210, 211, 212])

widget.shown_probs = widget.BOTH_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 2, 210, 211])

widget.shown_probs = widget.BOTH_PROBS + 1
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 1, 0, 2, 210])

widget.shown_probs = widget.BOTH_PROBS + 2
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 11, 1, 110, 2, 211])

def test_output_wrt_shown_probs_2(self):
"""One model misses one class"""
widget = self.widget
iris012 = self.iris
purge = Remove(class_flags=Remove.RemoveUnusedValues)
iris01 = purge(iris012[:100])

bayes01 = NaiveBayesLearner()(iris01)
bayes012 = NaiveBayesLearner()(iris012)

self.send_signal(widget.Inputs.data, iris012)
self.send_signal(widget.Inputs.predictors, bayes01, 0)
self.send_signal(widget.Inputs.predictors, bayes012, 1)

for i, pred in enumerate(widget.predictors):
p = pred.results.unmapped_probabilities
p[0] = 10 + 100 * i + np.arange(p.shape[1])
pred.results.unmapped_predicted[:] = i

widget.shown_probs = widget.NO_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 1])

widget.shown_probs = widget.DATA_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 0, 1, 110, 111, 112])

widget.shown_probs = widget.MODEL_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 112])

widget.shown_probs = widget.BOTH_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 112])

widget.shown_probs = widget.BOTH_PROBS + 1
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 1, 110])

widget.shown_probs = widget.BOTH_PROBS + 2
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 11, 1, 111])

widget.shown_probs = widget.BOTH_PROBS + 3
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
self.assertEqual(list(out.metas[0]), [0, 0, 1, 112])

def test_output_regression(self):
widget = self.widget
self.send_signal(widget.Inputs.data, self.housing)
self.send_signal(widget.Inputs.predictors,
LinearRegressionLearner()(self.housing), 0)
self.send_signal(widget.Inputs.predictors,
MeanLearner()(self.housing), 1)
out = self.get_output(widget.Outputs.predictions)
np.testing.assert_equal(
out.metas,
np.hstack([pred.results.predicted.T for pred in widget.predictors]))

@patch("Orange.widgets.evaluate.owpredictions.usable_scorers",
Mock(return_value=[_Scorer]))
def test_change_target(self):
Expand Down

0 comments on commit 034ceab

Please sign in to comment.