Skip to content

Commit

Permalink
Score: Name scorers and use names in ScoreTable
Browse files Browse the repository at this point in the history
  • Loading branch information
janezd committed Jan 7, 2023
1 parent 73bd57d commit 6d5f0ac
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 16 deletions.
16 changes: 16 additions & 0 deletions Orange/evaluation/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def __new__(mcs, name, bases, dict_, **kwargs):
if not kwargs.get("abstract"):
# Don't use inherited names, look into dict_
cls.name = dict_.get("name", name)
cls.long_name = dict_.get("long_name", cls.name)
cls.registry[name] = cls
else:
cls.registry = {}
Expand Down Expand Up @@ -139,6 +140,7 @@ def is_compatible(domain: Domain) -> bool:
# pylint: disable=invalid-name
class CA(ClassificationScore):
__wraps__ = skl_metrics.accuracy_score
name = "CA"
long_name = "Classification accuracy"
priority = 20

Expand Down Expand Up @@ -189,16 +191,20 @@ def compute_score(self, results, target=None, average='binary'):

class Precision(TargetScore):
__wraps__ = skl_metrics.precision_score
name = "Prec"
long_name = "Precision"
priority = 40


class Recall(TargetScore):
__wraps__ = skl_metrics.recall_score
name = long_name = "Recall"
priority = 50


class F1(TargetScore):
__wraps__ = skl_metrics.f1_score
name = long_name = "F1"
priority = 30


Expand All @@ -217,6 +223,7 @@ class AUC(ClassificationScore):
__wraps__ = skl_metrics.roc_auc_score
separate_folds = True
is_binary = True
name = "AUC"
long_name = "Area under ROC curve"
priority = 10

Expand Down Expand Up @@ -291,6 +298,8 @@ class LogLoss(ClassificationScore):
"""
__wraps__ = skl_metrics.log_loss
priority = 120
name = "LogLoss"
long_name = "Logistic loss"
default_visible = False

def compute_score(self, results, eps=1e-15, normalize=True,
Expand All @@ -308,6 +317,8 @@ def compute_score(self, results, eps=1e-15, normalize=True,
class Specificity(ClassificationScore):
is_binary = True
priority = 110
name = "Spec"
long_name = "Specificity"
default_visible = False

@staticmethod
Expand Down Expand Up @@ -361,11 +372,13 @@ def compute_score(self, results, target=None, average="binary"):

class MSE(RegressionScore):
__wraps__ = skl_metrics.mean_squared_error
name = "MSE"
long_name = "Mean square error"
priority = 20


class RMSE(RegressionScore):
name = "RMSE"
long_name = "Root mean square error"

def compute_score(self, results):
Expand All @@ -375,18 +388,21 @@ def compute_score(self, results):

class MAE(RegressionScore):
__wraps__ = skl_metrics.mean_absolute_error
name = "MAE"
long_name = "Mean absolute error"
priority = 40


# pylint: disable=invalid-name
class R2(RegressionScore):
__wraps__ = skl_metrics.r2_score
name = "R2"
long_name = "Coefficient of determination"
priority = 50


class CVRMSE(RegressionScore):
name = "CVRMSE"
long_name = "Coefficient of variation of the RMSE"
priority = 110
default_visible = False
Expand Down
1 change: 1 addition & 0 deletions Orange/widgets/evaluate/owtestandscore.py
Original file line number Diff line number Diff line change
Expand Up @@ -655,6 +655,7 @@ def update_stats_model(self):
item.setData(float(stat.value[0]), Qt.DisplayRole)
else:
item.setToolTip(str(stat.exception))
# pylint: disable=unsubscriptable-object
if self.score_table.show_score_hints[scorer.__name__]:
has_missing_scores = True
row.append(item)
Expand Down
34 changes: 20 additions & 14 deletions Orange/widgets/evaluate/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import unittest
import collections
from distutils.version import LooseVersion
from itertools import count
from unittest.mock import patch

import numpy as np
Expand All @@ -12,7 +13,7 @@
from AnyQt.QtCore import QPoint, Qt

import Orange
from Orange.evaluation.scoring import Score, RMSE, AUC, CA, F1, Specificity
from Orange.evaluation.scoring import Score, AUC, CA, F1, Specificity
from Orange.widgets.evaluate.utils import ScoreTable, usable_scorers
from Orange.widgets.tests.base import GuiTest
from Orange.data import Table, DiscreteVariable, ContinuousVariable
Expand Down Expand Up @@ -47,12 +48,12 @@ def setUp(self):
class NewScore(Score):
name = "new score"

self.NewScore = NewScore # pylint: disable=invalid-name

self.orig_hints = ScoreTable.show_score_hints
hints = ScoreTable.show_score_hints = self.orig_hints.default.copy()
hints.update(dict(F1=True, CA=False, AUC=True, Recall=True,
Specificity=False, NewScore=True))
self.name_to_qualname = {score.name: score.__name__
for score in Score.registry.values()}
self.score_table = ScoreTable(None)
self.score_table.update_header([F1, CA, AUC, Specificity, NewScore])
self.score_table._update_shown_columns()
Expand All @@ -72,23 +73,28 @@ def addAction(menu, a):
return action

def execmenu(*_):
scores = ["F1", "CA", "AUC", "Specificity", "new score"]
self.assertEqual(list(actions)[2:], scores)
# pylint: disable=unsubscriptable-object,unsupported-assignment-operation
scorers = [F1, CA, AUC, Specificity, self.NewScore]
self.assertEqual(list(actions)[2:], ['F1',
'Classification accuracy (CA)',
'Area under ROC curve (AUC)',
'Specificity (Spec)',
'new score'])
header = self.score_table.view.horizontalHeader()
for i, (name, action) in enumerate(actions.items()):
for i, action, scorer in zip(count(), actions.values(), scorers):
if i >= 2:
self.assertEqual(action.isChecked(),
hints[self.name_to_qualname[name]],
msg=f"error in section {name}")
hints[scorer.__name__],
msg=f"error in section {scorer.name}")
self.assertEqual(header.isSectionHidden(i),
hints[self.name_to_qualname[name]],
msg=f"error in section {name}")
actions["CA"].triggered.emit(True)
hints[scorer.__name__],
msg=f"error in section {scorer.name}")
actions["Classification accuracy (CA)"].triggered.emit(True)
hints["CA"] = True
for k, v in hints.items():
self.assertEqual(self.score_table.show_score_hints[k], v,
msg=f"error at {k}")
actions["AUC"].triggered.emit(False)
actions["Area under ROC curve (AUC)"].triggered.emit(False)
hints["AUC"] = False
for k, v in hints.items():
self.assertEqual(self.score_table.show_score_hints[k], v,
Expand All @@ -99,8 +105,8 @@ def execmenu(*_):
# Assertions are made within `menuexec` since they check the
# instances of `QAction`, which are invalid (destroyed by Qt?) after
# `menuexec` finishes.
with unittest.mock.patch("AnyQt.QtWidgets.QMenu.addAction", addAction), \
unittest.mock.patch("AnyQt.QtWidgets.QMenu.exec", execmenu):
with patch("AnyQt.QtWidgets.QMenu.addAction", addAction), \
patch("AnyQt.QtWidgets.QMenu.exec", execmenu):
self.score_table.show_column_chooser(QPoint(0, 0))

def test_sorting(self):
Expand Down
11 changes: 9 additions & 2 deletions Orange/widgets/evaluate/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,16 @@ def show_column_chooser(self, pos):
header = self.view.horizontalHeader()
for col in range(1, self.model.columnCount()):
item = self.model.horizontalHeaderItem(col)
action = menu.addAction(item.data(Qt.DisplayRole))
action.setCheckable(True)
qualname = item.data(Qt.UserRole)
if col < 3:
option = item.data(Qt.DisplayRole)
else:
score = Score.registry[qualname]
option = score.long_name
if score.name != score.long_name:
option += f" ({score.name})"
action = menu.addAction(option)
action.setCheckable(True)
action.setChecked(self.show_score_hints[qualname])

@action.triggered.connect
Expand Down

0 comments on commit 6d5f0ac

Please sign in to comment.