Skip to content

Neurosuite

pynapple.io.neurosuite

⚠ DEPRECATED: This will be removed in version 1.0.0. Check nwbmatic or neuroconv instead.

Class and functions for loading data processed with the Neurosuite (Klusters, Neuroscope, NDmanager)

@author: Guillaume Viejo

NeuroSuite

Bases: BaseLoader

Loader for kluster data

Source code in pynapple/io/neurosuite.py
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
class NeuroSuite(BaseLoader):
    """
    Loader for kluster data
    """

    def __init__(self, path):
        """
        Instantiate the data class from a neurosuite folder.

        Parameters
        ----------
        path : str
            The path to the data.
        """
        path = Path(path)
        self.basename = path.name
        self.time_support = None

        super().__init__(path)

        self.load_nwb_spikes()

    def load_nwb_spikes(self):
        """
        Read the NWB spikes to extract the spike times.

        Parameters
        ----------
        path : str
            The path to the data

        Returns
        -------
        TYPE
            Description
        """

        io = NWBHDF5IO(self.nwbfilepath, "r")
        nwbfile = io.read()

        if nwbfile.units is None:
            io.close()
            return False
        else:
            units = nwbfile.units.to_dataframe()
            spikes = {
                n: nap.Ts(t=units.loc[n, "spike_times"], time_units="s")
                for n in units.index
            }

            self.spikes = nap.TsGroup(
                spikes,
                time_support=self.time_support,
                time_units="s",
                group=units["group"],
            )

            if ~np.all(units["location"] == ""):
                self.spikes.set_info(location=units["location"])

            io.close()
            return True

    def load_lfp(
        self,
        filename=None,
        channel=None,
        extension=".eeg",
        frequency=1250.0,
        precision="int16",
        bytes_size=2,
    ):
        """
        Load the LFP.

        Parameters
        ----------
        filename : str, optional
            The filename of the lfp file.
            It can be useful it multiple dat files are present in the data directory
        channel : int or list of int, optional
            The channel(s) to load. If None return a memory map of the dat file to avoid memory error
        extension : str, optional
            The file extenstion (.eeg, .dat, .lfp). Make sure the frequency match
        frequency : float, optional
            Default 1250 Hz for the eeg file
        precision : str, optional
            The precision of the binary file
        bytes_size : int, optional
            Bytes size of the lfp file

        Raises
        ------
        RuntimeError
            If can't find the lfp/eeg/dat file

        Returns
        -------
        Tsd or TsdFrame
            The lfp in a time series format
        """
        if filename is not None:
            filepath = self.path / filename
        else:
            eegfile = list(filepath.glob(f"*{extension}"))

            if not len(eegfile):
                raise RuntimeError(
                    "Path {} contains no {} files;".format(self.path, extension)
                )

            filepath = eegfile[0]

        self.load_neurosuite_xml(self.path)

        n_channels = int(self.nChannels)

        f = open(filepath, "rb")
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2
        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        f.close()
        fp = np.memmap(filepath, np.int16, "r", shape=(n_samples, n_channels))
        timestep = np.arange(0, n_samples) / frequency

        time_support = nap.IntervalSet(start=0, end=duration, time_units="s")

        if channel is None:
            return nap.TsdFrame(
                t=timestep, d=fp, time_units="s", time_support=time_support
            )
        elif type(channel) is int:
            return nap.Tsd(
                t=timestep, d=fp[:, channel], time_units="s", time_support=time_support
            )
        elif type(channel) is list:
            return nap.TsdFrame(
                t=timestep,
                d=fp[:, channel],
                time_units="s",
                time_support=time_support,
                columns=channel,
            )

    def read_neuroscope_intervals(self, name=None, path2file=None):
        """
        This function reads .evt files in which odd raws indicate the beginning
        of the time series and the even raws are the ends.
        If the file is present in the nwb, provide the just the name. If the file
        is not present in the nwb, it loads the events from the nwb directory.
        If just the path is provided but not the name, it takes the name from the file.

        Parameters
        ----------
        name: str
            name of the epoch in the nwb file, e.g. "rem" or desired name save
            the data in the nwb.

        path2file: str
            Path of the file you want to load.

        Returns
        -------
        IntervalSet
            Contains two columns corresponding to the start and end of the intervals.

        """
        # if name:
        #     isets = self.load_nwb_intervals(name)
        #     if isinstance(isets, nap.IntervalSet):
        #         return isets

        if name is not None and path2file is None:
            path2file = self.path / (self.basename + "." + name + ".evt")
        if path2file is not None:  # TODO maybe useless conditional?
            try:
                # df = pd.read_csv(path2file, delimiter=' ', usecols = [0], header = None)
                tmp = np.genfromtxt(path2file)[:, 0]
                df = tmp.reshape(len(tmp) // 2, 2)
            except ValueError:
                print("specify a valid name")
            isets = nap.IntervalSet(df[:, 0], df[:, 1], time_units="ms")
            if name is None:
                name = path2file.split(".")[-2]
                print("*** saving file in the nwb as", name)
            # self.save_nwb_intervals(isets, name)
        else:
            raise ValueError("specify a valid path")
        return isets

    def write_neuroscope_intervals(self, extension, isets, name):
        """Write events to load with neuroscope (e.g. ripples start and ends)

        Parameters
        ----------
        extension : str
            The extension of the file (e.g. basename.evt.py.rip)
        isets : IntervalSet
            The IntervalSet to write
        name : str
            The name of the events (e.g. Ripples)
        """
        start = isets.as_units("ms")["start"].values
        ends = isets.as_units("ms")["end"].values

        datatowrite = np.vstack((start, ends)).T.flatten()

        n = len(isets)

        texttowrite = np.vstack(
            (
                (np.repeat(np.array([name + " start"]), n)),
                (np.repeat(np.array([name + " end"]), n)),
            )
        ).T.flatten()

        evt_file = self.path / (self.basename + extension)

        f = open(evt_file, "w")
        for t, n in zip(datatowrite, texttowrite):
            f.writelines("{:1.6f}".format(t) + "\t" + n + "\n")
        f.close()

        return

    def load_mean_waveforms(self, epoch=None, waveform_window=None, spike_count=1000):
        """
        Load the mean waveforms from a dat file.

        Parameters
        ----------
        epoch : IntervalSet
            default = None
            Restrict spikes to an epoch.
        waveform_window : IntervalSet
            default interval nap.IntervalSet(start = -0.0005, end = 0.001, time_units = 'ms')
            Limit waveform extraction before and after spike time
        spike_count : int
            default = 1000
            Number of spikes used per neuron for the calculation of waveforms

        Returns
        -------
        dictionary
            the waveforms for all neurons
        pandas.Series
            the channel with the maximum waveform for each neuron

        """
        if not isinstance(waveform_window, nap.IntervalSet):
            waveform_window = nap.IntervalSet(start=-0.5, end=1, time_units="ms")

        spikes = self.spikes
        if not self.path.exists():  # check if path exists
            print("The path " + self.path + " doesn't exist; Exiting ...")
            sys.exit()

        # Load XML INFO
        self.load_neurosuite_xml(self.path)
        n_channels = self.nChannels
        fs = self.fs_dat
        group_to_channel = self.group_to_channel
        group = spikes.get_info("group")

        # Check if there is an epoch, restrict spike times to epoch
        if epoch is not None:
            if type(epoch) is not nap.IntervalSet:
                print("Epoch must be an IntervalSet")
                sys.exit()
            else:
                print("Restricting spikes to epoch")
                spikes = spikes.restrict(epoch)
                epstart = int(epoch.as_units("s")["start"].values[0] * fs)
                epend = int(epoch.as_units("s")["end"].values[0] * fs)

        # Find dat file
        # files = os.listdir(self.path)
        #  dat_files = np.sort([f for f in files if "dat" in f and f[0] != "."])

        # Need n_samples collected in the entire recording from dat file to load
        # file = self.path / dat_files[0]
        file = next(self.path.glob("^[^.][^.]*.dat"))
        f = open(
            file, "rb"
        )  # open file to get number of samples collected in the entire recording
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2
        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        f.close()
        # map to memory all samples for all channels, channels are numbered according to neuroscope number
        fp = np.memmap(file, np.int16, "r", shape=(n_samples, n_channels))

        # convert spike times to spikes in sample number
        sample_spikes = {
            neuron: (spikes[neuron].as_units("s").index.values * fs).astype("int")
            for neuron in spikes
        }

        # prep for waveforms
        overlap = int(
            waveform_window.tot_length(time_units="s")
        )  # one spike's worth of overlap between windows
        waveform_window = abs(np.array(waveform_window.as_units("s"))[0] * fs).astype(
            int
        )  # convert time to sample number
        neuron_waveforms = {
            n: np.zeros([np.sum(waveform_window), len(group_to_channel[group[n]])])
            for n in sample_spikes
        }

        # divide dat file into batches that slightly overlap for faster loading
        batch_size = 3000000
        windows = np.arange(0, int(endoffile / n_channels / bytes_size), batch_size)
        if epoch is not None:
            print("Restricting dat file to epoch")
            windows = windows[(windows >= epstart) & (windows <= epend)]
        batches = []
        for (
            i
        ) in windows:  # make overlapping batches from the beginning to end of recording
            if i == windows[-1]:  # the last batch cannot overlap with the next one
                batches.append([i, n_samples])
            else:
                batches.append([i, i + batch_size + overlap])
        batches = [np.int32(batch) for batch in batches]

        sample_counted_spikes = {}
        for index, neuron in enumerate(sample_spikes):
            if len(sample_spikes[neuron]) >= spike_count:
                sample_counted_spikes[neuron] = np.array(
                    np.random.choice(list(sample_spikes[neuron]), spike_count)
                )
            elif len(sample_spikes[neuron]) < spike_count:
                print(
                    "Not enough spikes in neuron " + str(index) + "... using all spikes"
                )
                sample_counted_spikes[neuron] = sample_spikes[neuron]

        # Make one array containing all selected spike times of all neurons - will be used to check for spikes before loading dat file
        spike_check = np.array(
            [
                int(spikes_neuron)
                for spikes_neuron in sample_counted_spikes[neuron]
                for neuron in sample_counted_spikes
            ]
        )

        for index, timestep in enumerate(batches):
            print(
                f"Extracting waveforms from dat file: window {index+1} / {len(windows)}",
                end="\r",
            )

            if (
                len(
                    spike_check[
                        (timestep[0] < spike_check) & (timestep[1] > spike_check)
                    ]
                )
                == 0
            ):
                continue  # if there are no spikes for any neurons in this batch, skip and go to the next one

            # Load dat file for timestep
            tmp = pd.DataFrame(
                data=fp[timestep[0] : timestep[1], :],
                columns=np.arange(n_channels),
                index=range(timestep[0], timestep[1]),
            )  # load dat file

            # Check if any spikes are present
            for neuron in sample_counted_spikes:
                neurontmp = sample_counted_spikes[neuron]
                tmp2 = neurontmp[(timestep[0] < neurontmp) & (timestep[1] > neurontmp)]
                if len(neurontmp) == 0:
                    continue  # skip neuron if it has no spikes in this batch
                tmpn = tmp[
                    group_to_channel[group[neuron]]
                ]  # restrict dat file to the channel group of the neuron

                for time in tmp2:  # add each spike waveform to neuron_waveform
                    spikewindow = tmpn.loc[
                        time - waveform_window[0] : time + waveform_window[1] - 1
                    ]  # waveform for this spike time
                    try:
                        neuron_waveforms[neuron] += spikewindow.values
                    except (
                        Exception
                    ):  # ignore if full waveform is not present in this batch
                        pass

        meanwf = {
            n: pd.DataFrame(
                data=np.array(neuron_waveforms[n]) / spike_count,
                columns=np.arange(len(group_to_channel[group[n]])),
                index=np.array(np.arange(-waveform_window[0], waveform_window[1])) / fs,
            )
            for n in sample_counted_spikes
        }

        # find the max channel for each neuron
        maxch = pd.Series(
            data=[meanwf[n][meanwf[n].loc[0].idxmin()].name for n in meanwf],
            index=spikes.keys(),
        )

        return meanwf, maxch

load_data

load_data()

Load NWB data saved with pynapple in the pynapplenwb folder

Parameters:

Name Type Description Default
path str

Path to the session folder

required
Source code in pynapple/io/loader.py
def load_data(self):
    """
    Load NWB data saved with pynapple in the pynapplenwb folder

    Parameters
    ----------
    path : str
        Path to the session folder
    """

    io = NWBHDF5IO(self.nwbfilepath, "r+")
    nwbfile = io.read()

    position = {}
    acq_keys = nwbfile.acquisition.keys()
    if "CompassDirection" in acq_keys:
        compass = nwbfile.acquisition["CompassDirection"]
        for k in compass.spatial_series.keys():
            position[k] = pd.Series(
                index=compass.get_spatial_series(k).timestamps[:],
                data=compass.get_spatial_series(k).data[:],
            )
    if "Position" in acq_keys:
        tracking = nwbfile.acquisition["Position"]
        for k in tracking.spatial_series.keys():
            position[k] = pd.Series(
                index=tracking.get_spatial_series(k).timestamps[:],
                data=tracking.get_spatial_series(k).data[:],
            )
    if len(position):
        position = pd.DataFrame.from_dict(position)

        # retrieveing time support position if in epochs
        if "position_time_support" in nwbfile.intervals.keys():
            epochs = nwbfile.intervals["position_time_support"].to_dataframe()
            time_support = nap.IntervalSet(
                start=epochs["start_time"], end=epochs["stop_time"], time_units="s"
            )

        self.position = nap.TsdFrame(
            position, time_units="s", time_support=time_support
        )

    if nwbfile.epochs is not None:
        epochs = nwbfile.epochs.to_dataframe()
        # NWB is dumb and cannot take a single string for labels
        epochs["label"] = [epochs.loc[i, "tags"][0] for i in epochs.index]
        epochs = epochs.drop(labels="tags", axis=1)
        epochs = epochs.rename(columns={"start_time": "start", "stop_time": "end"})
        self.epochs = self._make_epochs(epochs)

        self.time_support = self._join_epochs(epochs, "s")

    io.close()

    return

save_nwb_intervals

save_nwb_intervals(iset, name, description='')

Add epochs to the NWB file (e.g. ripples epochs) See pynwb.epoch.TimeIntervals

Parameters:

Name Type Description Default
iset IntervalSet

The intervalSet to save

required
name str

The name in the nwb file

required
Source code in pynapple/io/loader.py
def save_nwb_intervals(self, iset, name, description=""):
    """
    Add epochs to the NWB file (e.g. ripples epochs)
    See pynwb.epoch.TimeIntervals

    Parameters
    ----------
    iset : IntervalSet
        The intervalSet to save
    name : str
        The name in the nwb file
    """
    io = NWBHDF5IO(self.nwbfilepath, "r+")
    nwbfile = io.read()

    epochs = iset.as_units("s")
    time_intervals = TimeIntervals(name=name, description=description)
    for i in epochs.index:
        time_intervals.add_interval(
            start_time=epochs.loc[i, "start"],
            stop_time=epochs.loc[i, "end"],
            tags=str(i),
        )

    nwbfile.add_time_intervals(time_intervals)
    io.write(nwbfile)
    io.close()

    return

save_nwb_timeseries

save_nwb_timeseries(tsd, name, description='')

Save timestamps in the NWB file (e.g. ripples time) with the time support. See pynwb.base.TimeSeries

Parameters:

Name Type Description Default
tsd TsdFrame

_

required
name str

_

required
description str

_

''
Source code in pynapple/io/loader.py
def save_nwb_timeseries(self, tsd, name, description=""):
    """
    Save timestamps in the NWB file (e.g. ripples time) with the time support.
    See pynwb.base.TimeSeries


    Parameters
    ----------
    tsd : TsdFrame
        _
    name : str
        _
    description : str, optional
        _
    """
    io = NWBHDF5IO(self.nwbfilepath, "r+")
    nwbfile = io.read()

    ts = TimeSeries(
        name=name,
        unit="s",
        data=tsd.values,
        timestamps=tsd.as_units("s").index.values,
    )

    time_support = TimeIntervals(
        name=name + "_timesupport", description="The time support of the object"
    )

    epochs = tsd.time_support.as_units("s")
    for i in epochs.index:
        time_support.add_interval(
            start_time=epochs.loc[i, "start"],
            stop_time=epochs.loc[i, "end"],
            tags=str(i),
        )
    nwbfile.add_time_intervals(time_support)
    nwbfile.add_acquisition(ts)
    io.write(nwbfile)
    io.close()

    return

load_nwb_intervals

load_nwb_intervals(name)

Load epochs from the NWB file (e.g. 'ripples')

Parameters:

Name Type Description Default
name str

The name in the nwb file

required
Source code in pynapple/io/loader.py
def load_nwb_intervals(self, name):
    """
    Load epochs from the NWB file (e.g. 'ripples')

    Parameters
    ----------
    name : str
        The name in the nwb file
    """
    io = NWBHDF5IO(self.nwbfilepath, "r")
    nwbfile = io.read()

    if name in nwbfile.intervals.keys():
        epochs = nwbfile.intervals[name].to_dataframe()
        isets = nap.IntervalSet(
            start=epochs["start_time"], end=epochs["stop_time"], time_units="s"
        )
        io.close()
        return isets
    else:
        io.close()
    return

load_nwb_timeseries

load_nwb_timeseries(name)

Load timestamps in the NWB file (e.g. ripples time)

Parameters:

Name Type Description Default
name str

_

required

Returns:

Type Description
Tsd

_

Source code in pynapple/io/loader.py
def load_nwb_timeseries(self, name):
    """
    Load timestamps in the NWB file (e.g. ripples time)

    Parameters
    ----------
    name : str
        _

    Returns
    -------
    Tsd
        _
    """
    io = NWBHDF5IO(self.nwbfilepath, "r")
    nwbfile = io.read()

    ts = nwbfile.acquisition[name]

    time_support = self.load_nwb_intervals(name + "_timesupport")

    tsd = nap.Tsd(
        t=ts.timestamps[:], d=ts.data[:], time_units="s", time_support=time_support
    )

    io.close()

    return tsd

__init__

__init__(path)

Instantiate the data class from a neurosuite folder.

Parameters:

Name Type Description Default
path str

The path to the data.

required
Source code in pynapple/io/neurosuite.py
def __init__(self, path):
    """
    Instantiate the data class from a neurosuite folder.

    Parameters
    ----------
    path : str
        The path to the data.
    """
    path = Path(path)
    self.basename = path.name
    self.time_support = None

    super().__init__(path)

    self.load_nwb_spikes()

load_nwb_spikes

load_nwb_spikes()

Read the NWB spikes to extract the spike times.

Parameters:

Name Type Description Default
path str

The path to the data

required

Returns:

Type Description
TYPE

Description

Source code in pynapple/io/neurosuite.py
def load_nwb_spikes(self):
    """
    Read the NWB spikes to extract the spike times.

    Parameters
    ----------
    path : str
        The path to the data

    Returns
    -------
    TYPE
        Description
    """

    io = NWBHDF5IO(self.nwbfilepath, "r")
    nwbfile = io.read()

    if nwbfile.units is None:
        io.close()
        return False
    else:
        units = nwbfile.units.to_dataframe()
        spikes = {
            n: nap.Ts(t=units.loc[n, "spike_times"], time_units="s")
            for n in units.index
        }

        self.spikes = nap.TsGroup(
            spikes,
            time_support=self.time_support,
            time_units="s",
            group=units["group"],
        )

        if ~np.all(units["location"] == ""):
            self.spikes.set_info(location=units["location"])

        io.close()
        return True

load_lfp

load_lfp(
    filename=None,
    channel=None,
    extension=".eeg",
    frequency=1250.0,
    precision="int16",
    bytes_size=2,
)

Load the LFP.

Parameters:

Name Type Description Default
filename str

The filename of the lfp file. It can be useful it multiple dat files are present in the data directory

None
channel int or list of int

The channel(s) to load. If None return a memory map of the dat file to avoid memory error

None
extension str

The file extenstion (.eeg, .dat, .lfp). Make sure the frequency match

'.eeg'
frequency float

Default 1250 Hz for the eeg file

1250.0
precision str

The precision of the binary file

'int16'
bytes_size int

Bytes size of the lfp file

2

Raises:

Type Description
RuntimeError

If can't find the lfp/eeg/dat file

Returns:

Type Description
Tsd or TsdFrame

The lfp in a time series format

Source code in pynapple/io/neurosuite.py
def load_lfp(
    self,
    filename=None,
    channel=None,
    extension=".eeg",
    frequency=1250.0,
    precision="int16",
    bytes_size=2,
):
    """
    Load the LFP.

    Parameters
    ----------
    filename : str, optional
        The filename of the lfp file.
        It can be useful it multiple dat files are present in the data directory
    channel : int or list of int, optional
        The channel(s) to load. If None return a memory map of the dat file to avoid memory error
    extension : str, optional
        The file extenstion (.eeg, .dat, .lfp). Make sure the frequency match
    frequency : float, optional
        Default 1250 Hz for the eeg file
    precision : str, optional
        The precision of the binary file
    bytes_size : int, optional
        Bytes size of the lfp file

    Raises
    ------
    RuntimeError
        If can't find the lfp/eeg/dat file

    Returns
    -------
    Tsd or TsdFrame
        The lfp in a time series format
    """
    if filename is not None:
        filepath = self.path / filename
    else:
        eegfile = list(filepath.glob(f"*{extension}"))

        if not len(eegfile):
            raise RuntimeError(
                "Path {} contains no {} files;".format(self.path, extension)
            )

        filepath = eegfile[0]

    self.load_neurosuite_xml(self.path)

    n_channels = int(self.nChannels)

    f = open(filepath, "rb")
    startoffile = f.seek(0, 0)
    endoffile = f.seek(0, 2)
    bytes_size = 2
    n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
    duration = n_samples / frequency
    f.close()
    fp = np.memmap(filepath, np.int16, "r", shape=(n_samples, n_channels))
    timestep = np.arange(0, n_samples) / frequency

    time_support = nap.IntervalSet(start=0, end=duration, time_units="s")

    if channel is None:
        return nap.TsdFrame(
            t=timestep, d=fp, time_units="s", time_support=time_support
        )
    elif type(channel) is int:
        return nap.Tsd(
            t=timestep, d=fp[:, channel], time_units="s", time_support=time_support
        )
    elif type(channel) is list:
        return nap.TsdFrame(
            t=timestep,
            d=fp[:, channel],
            time_units="s",
            time_support=time_support,
            columns=channel,
        )

read_neuroscope_intervals

read_neuroscope_intervals(name=None, path2file=None)

This function reads .evt files in which odd raws indicate the beginning of the time series and the even raws are the ends. If the file is present in the nwb, provide the just the name. If the file is not present in the nwb, it loads the events from the nwb directory. If just the path is provided but not the name, it takes the name from the file.

Parameters:

Name Type Description Default
name

name of the epoch in the nwb file, e.g. "rem" or desired name save the data in the nwb.

None
path2file

Path of the file you want to load.

None

Returns:

Type Description
IntervalSet

Contains two columns corresponding to the start and end of the intervals.

Source code in pynapple/io/neurosuite.py
def read_neuroscope_intervals(self, name=None, path2file=None):
    """
    This function reads .evt files in which odd raws indicate the beginning
    of the time series and the even raws are the ends.
    If the file is present in the nwb, provide the just the name. If the file
    is not present in the nwb, it loads the events from the nwb directory.
    If just the path is provided but not the name, it takes the name from the file.

    Parameters
    ----------
    name: str
        name of the epoch in the nwb file, e.g. "rem" or desired name save
        the data in the nwb.

    path2file: str
        Path of the file you want to load.

    Returns
    -------
    IntervalSet
        Contains two columns corresponding to the start and end of the intervals.

    """
    # if name:
    #     isets = self.load_nwb_intervals(name)
    #     if isinstance(isets, nap.IntervalSet):
    #         return isets

    if name is not None and path2file is None:
        path2file = self.path / (self.basename + "." + name + ".evt")
    if path2file is not None:  # TODO maybe useless conditional?
        try:
            # df = pd.read_csv(path2file, delimiter=' ', usecols = [0], header = None)
            tmp = np.genfromtxt(path2file)[:, 0]
            df = tmp.reshape(len(tmp) // 2, 2)
        except ValueError:
            print("specify a valid name")
        isets = nap.IntervalSet(df[:, 0], df[:, 1], time_units="ms")
        if name is None:
            name = path2file.split(".")[-2]
            print("*** saving file in the nwb as", name)
        # self.save_nwb_intervals(isets, name)
    else:
        raise ValueError("specify a valid path")
    return isets

write_neuroscope_intervals

write_neuroscope_intervals(extension, isets, name)

Write events to load with neuroscope (e.g. ripples start and ends)

Parameters:

Name Type Description Default
extension str

The extension of the file (e.g. basename.evt.py.rip)

required
isets IntervalSet

The IntervalSet to write

required
name str

The name of the events (e.g. Ripples)

required
Source code in pynapple/io/neurosuite.py
def write_neuroscope_intervals(self, extension, isets, name):
    """Write events to load with neuroscope (e.g. ripples start and ends)

    Parameters
    ----------
    extension : str
        The extension of the file (e.g. basename.evt.py.rip)
    isets : IntervalSet
        The IntervalSet to write
    name : str
        The name of the events (e.g. Ripples)
    """
    start = isets.as_units("ms")["start"].values
    ends = isets.as_units("ms")["end"].values

    datatowrite = np.vstack((start, ends)).T.flatten()

    n = len(isets)

    texttowrite = np.vstack(
        (
            (np.repeat(np.array([name + " start"]), n)),
            (np.repeat(np.array([name + " end"]), n)),
        )
    ).T.flatten()

    evt_file = self.path / (self.basename + extension)

    f = open(evt_file, "w")
    for t, n in zip(datatowrite, texttowrite):
        f.writelines("{:1.6f}".format(t) + "\t" + n + "\n")
    f.close()

    return

load_mean_waveforms

load_mean_waveforms(
    epoch=None, waveform_window=None, spike_count=1000
)

Load the mean waveforms from a dat file.

Parameters:

Name Type Description Default
epoch IntervalSet

default = None Restrict spikes to an epoch.

None
waveform_window IntervalSet

default interval nap.IntervalSet(start = -0.0005, end = 0.001, time_units = 'ms') Limit waveform extraction before and after spike time

None
spike_count int

default = 1000 Number of spikes used per neuron for the calculation of waveforms

1000

Returns:

Type Description
dictionary

the waveforms for all neurons

Series

the channel with the maximum waveform for each neuron

Source code in pynapple/io/neurosuite.py
def load_mean_waveforms(self, epoch=None, waveform_window=None, spike_count=1000):
    """
    Load the mean waveforms from a dat file.

    Parameters
    ----------
    epoch : IntervalSet
        default = None
        Restrict spikes to an epoch.
    waveform_window : IntervalSet
        default interval nap.IntervalSet(start = -0.0005, end = 0.001, time_units = 'ms')
        Limit waveform extraction before and after spike time
    spike_count : int
        default = 1000
        Number of spikes used per neuron for the calculation of waveforms

    Returns
    -------
    dictionary
        the waveforms for all neurons
    pandas.Series
        the channel with the maximum waveform for each neuron

    """
    if not isinstance(waveform_window, nap.IntervalSet):
        waveform_window = nap.IntervalSet(start=-0.5, end=1, time_units="ms")

    spikes = self.spikes
    if not self.path.exists():  # check if path exists
        print("The path " + self.path + " doesn't exist; Exiting ...")
        sys.exit()

    # Load XML INFO
    self.load_neurosuite_xml(self.path)
    n_channels = self.nChannels
    fs = self.fs_dat
    group_to_channel = self.group_to_channel
    group = spikes.get_info("group")

    # Check if there is an epoch, restrict spike times to epoch
    if epoch is not None:
        if type(epoch) is not nap.IntervalSet:
            print("Epoch must be an IntervalSet")
            sys.exit()
        else:
            print("Restricting spikes to epoch")
            spikes = spikes.restrict(epoch)
            epstart = int(epoch.as_units("s")["start"].values[0] * fs)
            epend = int(epoch.as_units("s")["end"].values[0] * fs)

    # Find dat file
    # files = os.listdir(self.path)
    #  dat_files = np.sort([f for f in files if "dat" in f and f[0] != "."])

    # Need n_samples collected in the entire recording from dat file to load
    # file = self.path / dat_files[0]
    file = next(self.path.glob("^[^.][^.]*.dat"))
    f = open(
        file, "rb"
    )  # open file to get number of samples collected in the entire recording
    startoffile = f.seek(0, 0)
    endoffile = f.seek(0, 2)
    bytes_size = 2
    n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
    f.close()
    # map to memory all samples for all channels, channels are numbered according to neuroscope number
    fp = np.memmap(file, np.int16, "r", shape=(n_samples, n_channels))

    # convert spike times to spikes in sample number
    sample_spikes = {
        neuron: (spikes[neuron].as_units("s").index.values * fs).astype("int")
        for neuron in spikes
    }

    # prep for waveforms
    overlap = int(
        waveform_window.tot_length(time_units="s")
    )  # one spike's worth of overlap between windows
    waveform_window = abs(np.array(waveform_window.as_units("s"))[0] * fs).astype(
        int
    )  # convert time to sample number
    neuron_waveforms = {
        n: np.zeros([np.sum(waveform_window), len(group_to_channel[group[n]])])
        for n in sample_spikes
    }

    # divide dat file into batches that slightly overlap for faster loading
    batch_size = 3000000
    windows = np.arange(0, int(endoffile / n_channels / bytes_size), batch_size)
    if epoch is not None:
        print("Restricting dat file to epoch")
        windows = windows[(windows >= epstart) & (windows <= epend)]
    batches = []
    for (
        i
    ) in windows:  # make overlapping batches from the beginning to end of recording
        if i == windows[-1]:  # the last batch cannot overlap with the next one
            batches.append([i, n_samples])
        else:
            batches.append([i, i + batch_size + overlap])
    batches = [np.int32(batch) for batch in batches]

    sample_counted_spikes = {}
    for index, neuron in enumerate(sample_spikes):
        if len(sample_spikes[neuron]) >= spike_count:
            sample_counted_spikes[neuron] = np.array(
                np.random.choice(list(sample_spikes[neuron]), spike_count)
            )
        elif len(sample_spikes[neuron]) < spike_count:
            print(
                "Not enough spikes in neuron " + str(index) + "... using all spikes"
            )
            sample_counted_spikes[neuron] = sample_spikes[neuron]

    # Make one array containing all selected spike times of all neurons - will be used to check for spikes before loading dat file
    spike_check = np.array(
        [
            int(spikes_neuron)
            for spikes_neuron in sample_counted_spikes[neuron]
            for neuron in sample_counted_spikes
        ]
    )

    for index, timestep in enumerate(batches):
        print(
            f"Extracting waveforms from dat file: window {index+1} / {len(windows)}",
            end="\r",
        )

        if (
            len(
                spike_check[
                    (timestep[0] < spike_check) & (timestep[1] > spike_check)
                ]
            )
            == 0
        ):
            continue  # if there are no spikes for any neurons in this batch, skip and go to the next one

        # Load dat file for timestep
        tmp = pd.DataFrame(
            data=fp[timestep[0] : timestep[1], :],
            columns=np.arange(n_channels),
            index=range(timestep[0], timestep[1]),
        )  # load dat file

        # Check if any spikes are present
        for neuron in sample_counted_spikes:
            neurontmp = sample_counted_spikes[neuron]
            tmp2 = neurontmp[(timestep[0] < neurontmp) & (timestep[1] > neurontmp)]
            if len(neurontmp) == 0:
                continue  # skip neuron if it has no spikes in this batch
            tmpn = tmp[
                group_to_channel[group[neuron]]
            ]  # restrict dat file to the channel group of the neuron

            for time in tmp2:  # add each spike waveform to neuron_waveform
                spikewindow = tmpn.loc[
                    time - waveform_window[0] : time + waveform_window[1] - 1
                ]  # waveform for this spike time
                try:
                    neuron_waveforms[neuron] += spikewindow.values
                except (
                    Exception
                ):  # ignore if full waveform is not present in this batch
                    pass

    meanwf = {
        n: pd.DataFrame(
            data=np.array(neuron_waveforms[n]) / spike_count,
            columns=np.arange(len(group_to_channel[group[n]])),
            index=np.array(np.arange(-waveform_window[0], waveform_window[1])) / fs,
        )
        for n in sample_counted_spikes
    }

    # find the max channel for each neuron
    maxch = pd.Series(
        data=[meanwf[n][meanwf[n].loc[0].idxmin()].name for n in meanwf],
        index=spikes.keys(),
    )

    return meanwf, maxch