evaluator.py 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. ''' Incremental-Classifier Learning
  2. Authors : Khurram Javed, Muhammad Talha Paracha
  3. Maintainer : Khurram Javed
  4. Lab : TUKL-SEECS R&D Lab
  5. Email : 14besekjaved@seecs.edu.pk '''
  6. import logging
  7. import numpy as np
  8. import torch
  9. import torch.nn.functional as F
  10. from torch.autograd import Variable
  11. from torchnet.meter import confusionmeter
  12. from tqdm import tqdm
  13. logger = logging.getLogger('iCARL')
  14. class EvaluatorFactory():
  15. '''
  16. This class is used to get different versions of evaluators
  17. '''
  18. def __init__(self):
  19. pass
  20. @staticmethod
  21. def get_evaluator(testType="rmse", cuda=True):
  22. if testType == "rmse":
  23. return DocumentMseEvaluator(cuda)
  24. class DocumentMseEvaluator():
  25. '''
  26. Evaluator class for softmax classification
  27. '''
  28. def __init__(self, cuda):
  29. self.cuda = cuda
  30. def evaluate(self, model, iterator):
  31. model.eval()
  32. lossAvg = None
  33. with torch.no_grad():
  34. for img, target in tqdm(iterator):
  35. if self.cuda:
  36. img, target = img.cuda(), target.cuda()
  37. response = model(Variable(img))
  38. # print (response[0])
  39. # print (target[0])
  40. loss = F.mse_loss(response, Variable(target.float()))
  41. loss = torch.sqrt(loss)
  42. if lossAvg is None:
  43. lossAvg = loss
  44. else:
  45. lossAvg += loss
  46. # logger.debug("Cur loss %s", str(loss))
  47. lossAvg /= len(iterator)
  48. logger.info("Avg Val Loss %s", str((lossAvg).cpu().data.numpy()))