Skip to content

bAnalysisResults

Classes¤

NumpyEncoder ¤

Bases: JSONEncoder

Special json encoder for numpy types

Source code in sanpy/bAnalysisResults.py
568
569
570
571
572
573
574
575
576
577
578
class NumpyEncoder(json.JSONEncoder):
    """Special json encoder for numpy types"""

    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        return json.JSONEncoder.default(self, obj)

analysisResult ¤

Source code in sanpy/bAnalysisResults.py
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
class analysisResult:
    def __init__(self, theDict=None):
        """Create an anlysis item (for one spike)

        Args:
            theDict: Pre-existing dict when we load form h5 file
        """
        # this is the raw definition of analysis results (See above)
        # pull from this to create key/value self.rDict
        # self._dDict = analysisResultDict
        defaultDict = analysisResultDict
        # this is simple key/value pairs as we will in detection
        self._rDict = {}
        for k, v in defaultDict.items():
            default = v["default"]
            self._rDict[k] = default

        if theDict is not None:
            for k, v in theDict.items():
                self[k] = v  # calls __setitem__()

    # this was interfering with converting to DataFrame ???
    """
    def __str__(self):
        printList = []
        for k,v in self._rDict.items():
            if isinstance(v, dict):
                for k2,v2 in v.items():
                    printList.append(f'  {k2} : {v2} {type(v2)}')
            else:
                printList.append(f'{k} : {v} {type(v)}')
        return '\n'.join(printList)
    """

    def print(self):
        printList = []
        for k, v in self._rDict.items():
            if isinstance(v, list):
                for item in v:
                    for k2, v2 in item.items():
                        printList.append(f"  {k2} : {v2} {type(v2)}")
            else:
                printList.append(f"{k} : {v} {type(v)}")
        return "\n".join(printList)

    def addNewKey(self, theKey, theDefault=None):
        """
        Add a new key to this spike.

        Returns: (bool) True if new key added, false if key already exists.
        """
        if theDefault is None:
            # theType = 'float'
            theDefault = float("nan")

        # check if key exists
        keyExists = theKey in self._rDict.keys()
        addedKey = False
        if keyExists:
            # key exists, don't modify
            # logger.warning(f'The key "{theKey}" already exists and has value "{self._rDict[theKey]}"')
            pass
        else:
            self._rDict[theKey] = theDefault
            addedKey = True

        #
        return addedKey

    def asDict(self):
        """
        Returns underlying dictionary
        """
        return self._rDict

    def __getitem__(self, key):
        # to mimic a dictionary
        ret = None
        try:
            # return self._dDict[key]['currentValue']
            ret = self._rDict[key]
        except KeyError as e:
            logger.error(f'Error getting key "{key}"')
            logger.error(f'possible keys are: {self._rDict.keys()}')
            raise
        #
        return ret

    def __setitem__(self, key, value):
        # to mimic a dictionary
        try:
            # self._dDict[key]['currentValue'] = value
            self._rDict[key] = value
        except KeyError as e:
            logger.error(f"{e}")

    def items(self):
        # to mimic a dictionary
        return self._rDict.items()

    def keys(self):
        # to mimic a dictionary
        return self._rDict.keys()

Functions¤

__init__(theDict=None) ¤

Create an anlysis item (for one spike)

Args: theDict: Pre-existing dict when we load form h5 file

Source code in sanpy/bAnalysisResults.py
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
def __init__(self, theDict=None):
    """Create an anlysis item (for one spike)

    Args:
        theDict: Pre-existing dict when we load form h5 file
    """
    # this is the raw definition of analysis results (See above)
    # pull from this to create key/value self.rDict
    # self._dDict = analysisResultDict
    defaultDict = analysisResultDict
    # this is simple key/value pairs as we will in detection
    self._rDict = {}
    for k, v in defaultDict.items():
        default = v["default"]
        self._rDict[k] = default

    if theDict is not None:
        for k, v in theDict.items():
            self[k] = v  # calls __setitem__()
addNewKey(theKey, theDefault=None) ¤

Add a new key to this spike.

Returns: (bool) True if new key added, false if key already exists.

Source code in sanpy/bAnalysisResults.py
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
def addNewKey(self, theKey, theDefault=None):
    """
    Add a new key to this spike.

    Returns: (bool) True if new key added, false if key already exists.
    """
    if theDefault is None:
        # theType = 'float'
        theDefault = float("nan")

    # check if key exists
    keyExists = theKey in self._rDict.keys()
    addedKey = False
    if keyExists:
        # key exists, don't modify
        # logger.warning(f'The key "{theKey}" already exists and has value "{self._rDict[theKey]}"')
        pass
    else:
        self._rDict[theKey] = theDefault
        addedKey = True

    #
    return addedKey
asDict() ¤

Returns underlying dictionary

Source code in sanpy/bAnalysisResults.py
784
785
786
787
788
def asDict(self):
    """
    Returns underlying dictionary
    """
    return self._rDict

analysisResultList ¤

Class encapsulating a list of analysis results.

Each row is an analysisResultDict for one spike.

These are keys in bAnalysis_ spike dict and columns in output reports

Source code in sanpy/bAnalysisResults.py
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
class analysisResultList:
    """Class encapsulating a list of analysis results.

    Each row is an analysisResultDict for one spike.

    These are keys in bAnalysis_ spike dict and columns in output reports
    """

    def __init__(self):
        # one copy for entire list

        # TODO: put xxx in a function getAnalysisResltDict()
        self._dDict = analysisResultDict

        # list of analysisResultDict
        self._myList = []

        self._iterIdx = -1

    def setFromListDict(self, listOfDict: List[dict]):
        """Set analysis results from a list of dict.

        Used when loading sanpy.bAnalysis from h5 file.

        When we create self (during spike detect) we have a list of class analysisResult.
        When we save/load we have a list of dict.

        This is assuming we re-create self every time we do spike detection
        """

        # do not do this, we are an analysisResultList as a list of analysisResult
        # self._myList = listOfDict

        self._myList = []
        for oneDict in listOfDict:
            oneAnalysisResult = analysisResult(theDict=oneDict)
            self._myList.append(oneAnalysisResult)

    def analysisDate(self):
        if len(self) > 0:
            return self._myList[0]["analysisDate"]
        else:
            return None

    def analysisTime(self):
        if len(self) > 0:
            return self._myList[0]["analysisTime"]
        else:
            return None

    def _old_save(self, saveBase):
        savePath = saveBase + "-analysis.json"

        analysisList = self.asList()

        # print(analysisList[0].print())
        print(self._myList[0])

        with open(savePath, "w") as f:
            json.dump(analysisList, f, cls=NumpyEncoder, indent=4)

    def _old_load(self, loadBase):
        loadPath = loadBase + "-analysis.json"

        if not os.path.isfile(loadPath):
            logger.error(f"Did not find file: {loadPath}")
            return

        with open(loadPath, "r") as f:
            self._myList = json.load(f)

    def appendDefault(self):
        """Append a spike to analysis.

        Used in bAnalysis spike detection.
        """
        oneResult = analysisResult()
        self._myList.append(oneResult)

    def appendAnalysis(self, analysisResultList):
        for analysisResult in analysisResultList:
            # analysisResult is for one spike
            self._myList.append(analysisResult)

    def addAnalysisResult(self, theKey, theDefault=None):
        # go through list and add to each [i] dict
        for spike in self:
            spike.addNewKey(theKey, theDefault=theDefault)

    def asList(self):
        """
        Return underlying list.
        """
        # return [spike.asDict() for spike in self._myList]
        return [x.asDict() for x in self._myList]

    def asDataFrame(self):
        """
        Note: underlying _myList is a list of analysisResult
        """
        return pd.DataFrame(self.asList())

    def __getitem__(self, key):
        """
        Allow [] indexing with self[int].
        """
        try:
            # return self._dDict[key]['currentValue']
            return self._myList[key]
        except IndexError as e:
            logger.error(f"{e}")
            # logger.error(f'possible keys are: {self._myList.keys()}')

    def __len__(self):
        """Allow len() with len(this)"""
        return len(self._myList)

    def __iter__(self):
        """Allow iteration with "for item in self"
        """
        _iterIdx = -1
        return self

    def __next__(self):
        """Allow iteration with "for item in self"
        """
        self._iterIdx += 1
        if self._iterIdx >= len(self._myList):
            self._iterIdx = -1  # reset to initial value
            raise StopIteration
        else:
            return self._myList[self._iterIdx]

Functions¤

__getitem__(key) ¤

Allow [] indexing with self[int].

Source code in sanpy/bAnalysisResults.py
683
684
685
686
687
688
689
690
691
def __getitem__(self, key):
    """
    Allow [] indexing with self[int].
    """
    try:
        # return self._dDict[key]['currentValue']
        return self._myList[key]
    except IndexError as e:
        logger.error(f"{e}")
__iter__() ¤

Allow iteration with "for item in self"

Source code in sanpy/bAnalysisResults.py
698
699
700
701
702
def __iter__(self):
    """Allow iteration with "for item in self"
    """
    _iterIdx = -1
    return self
__len__() ¤

Allow len() with len(this)

Source code in sanpy/bAnalysisResults.py
694
695
696
def __len__(self):
    """Allow len() with len(this)"""
    return len(self._myList)
__next__() ¤

Allow iteration with "for item in self"

Source code in sanpy/bAnalysisResults.py
704
705
706
707
708
709
710
711
712
def __next__(self):
    """Allow iteration with "for item in self"
    """
    self._iterIdx += 1
    if self._iterIdx >= len(self._myList):
        self._iterIdx = -1  # reset to initial value
        raise StopIteration
    else:
        return self._myList[self._iterIdx]
appendDefault() ¤

Append a spike to analysis.

Used in bAnalysis spike detection.

Source code in sanpy/bAnalysisResults.py
652
653
654
655
656
657
658
def appendDefault(self):
    """Append a spike to analysis.

    Used in bAnalysis spike detection.
    """
    oneResult = analysisResult()
    self._myList.append(oneResult)
asDataFrame() ¤

Note: underlying _myList is a list of analysisResult

Source code in sanpy/bAnalysisResults.py
677
678
679
680
681
def asDataFrame(self):
    """
    Note: underlying _myList is a list of analysisResult
    """
    return pd.DataFrame(self.asList())
asList() ¤

Return underlying list.

Source code in sanpy/bAnalysisResults.py
670
671
672
673
674
675
def asList(self):
    """
    Return underlying list.
    """
    # return [spike.asDict() for spike in self._myList]
    return [x.asDict() for x in self._myList]
setFromListDict(listOfDict) ¤

Set analysis results from a list of dict.

Used when loading sanpy.bAnalysis from h5 file.

When we create self (during spike detect) we have a list of class analysisResult. When we save/load we have a list of dict.

This is assuming we re-create self every time we do spike detection

Source code in sanpy/bAnalysisResults.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def setFromListDict(self, listOfDict: List[dict]):
    """Set analysis results from a list of dict.

    Used when loading sanpy.bAnalysis from h5 file.

    When we create self (during spike detect) we have a list of class analysisResult.
    When we save/load we have a list of dict.

    This is assuming we re-create self every time we do spike detection
    """

    # do not do this, we are an analysisResultList as a list of analysisResult
    # self._myList = listOfDict

    self._myList = []
    for oneDict in listOfDict:
        oneAnalysisResult = analysisResult(theDict=oneDict)
        self._myList.append(oneAnalysisResult)

Functions¤

printDocs() ¤

Print out human readable detection parameters and convert to markdown table.

Requires: pip install tabulate

See: bDetection.printDocs()

Source code in sanpy/bAnalysisResults.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def printDocs():
    """Print out human readable detection parameters and convert to markdown table.

    Requires:
        pip install tabulate

    See: bDetection.printDocs()
    """
    import pandas as pd
    from datetime import datetime

    logger.info("Ensure there are no errors")

    dictList = []
    for k, v in analysisResultDict.items():
        # iterating on getDefaultDict() to ensure all code above has valid k/v pairs
        # lineStr = k + '\t'
        oneDict = {
            "Name": k,
        }
        for k2 in getDefaultDict().keys():
            # print(f'  {k}: {k2}: {v[k2]}')
            # lineStr += f'{v[k2]}' + '\t'
            oneDict[k2] = v[k2]
        #
        # print(lineStr)

        dictList.append(oneDict)

        # check that k is in headerDefaultDict
        for k3 in v:
            if not k3 in getDefaultDict().keys():
                logger.error(f'Found extra key "{k}" in "analysisResultDict"')

    #
    df = pd.DataFrame(dictList)

    if 1:
        # to markdown for mkdocs md file
        # str = df.to_markdown()
        str = df.to_html()
        myDate = datetime.today().strftime("%Y-%m-%d")
        print(f"Generated {myDate} with sanpy.analysisVersion {sanpy.analysisVersion}")
        print(str)

    if 0:
        path = "/Users/cudmore/Desktop/sanpy-analysis-results.csv"
        print("saving:", path)
        df.to_csv(path, index=False)
All material is Copyright 2011-2023 Robert H. Cudmore