netCDF4
-Version 1.6.0
+Version 1.6.1
Introduction
@@ -582,7 +601,7 @@Creating/Opening/Closing a netCDF
Here's an example:
->>> from netCDF4 import Dataset
+>>> from netCDF4 import Dataset
>>> rootgrp = Dataset("test.nc", "w", format="NETCDF4")
>>> print(rootgrp.data_model)
NETCDF4
@@ -611,7 +630,7 @@ Groups in a netCDF file
NETCDF4
formatted files support Groups, if you try to create a Group
in a netCDF 3 file you will get an error message.
->>> rootgrp = Dataset("test.nc", "a")
+>>> rootgrp = Dataset("test.nc", "a")
>>> fcstgrp = rootgrp.createGroup("forecasts")
>>> analgrp = rootgrp.createGroup("analyses")
>>> print(rootgrp.groups)
@@ -635,7 +654,7 @@ Groups in a netCDF file
that group. To simplify the creation of nested groups, you can
use a unix-like path as an argument to Dataset.createGroup
.
->>> fcstgrp1 = rootgrp.createGroup("/forecasts/model1")
+>>> fcstgrp1 = rootgrp.createGroup("/forecasts/model1")
>>> fcstgrp2 = rootgrp.createGroup("/forecasts/model2")
@@ -649,7 +668,7 @@ Groups in a netCDF file
to walk the directory tree. Note that printing the Dataset
or Group
object yields summary information about it's contents.
->>> def walktree(top):
+>>> def walktree(top):
... yield top.groups.values()
... for value in top.groups.values():
... yield from walktree(value)
@@ -699,7 +718,7 @@ Dimensions in a netCDF file
dimension is a new netCDF 4 feature, in netCDF 3 files there may be only
one, and it must be the first (leftmost) dimension of the variable.
->>> level = rootgrp.createDimension("level", None)
+>>> level = rootgrp.createDimension("level", None)
>>> time = rootgrp.createDimension("time", None)
>>> lat = rootgrp.createDimension("lat", 73)
>>> lon = rootgrp.createDimension("lon", 144)
@@ -707,7 +726,7 @@ Dimensions in a netCDF file
All of the Dimension
instances are stored in a python dictionary.
->>> print(rootgrp.dimensions)
+>>> print(rootgrp.dimensions)
{'level': <class 'netCDF4._netCDF4.Dimension'> (unlimited): name = 'level', size = 0, 'time': <class 'netCDF4._netCDF4.Dimension'> (unlimited): name = 'time', size = 0, 'lat': <class 'netCDF4._netCDF4.Dimension'>: name = 'lat', size = 73, 'lon': <class 'netCDF4._netCDF4.Dimension'>: name = 'lon', size = 144}
@@ -716,7 +735,7 @@ Dimensions in a netCDF file
Dimension.isunlimited
method of a Dimension
instance
be used to determine if the dimensions is unlimited, or appendable.
->>> print(len(lon))
+>>> print(len(lon))
144
>>> print(lon.isunlimited())
False
@@ -728,7 +747,7 @@ Dimensions in a netCDF file
provides useful summary info, including the name and length of the dimension,
and whether it is unlimited.
->>> for dimobj in rootgrp.dimensions.values():
+>>> for dimobj in rootgrp.dimensions.values():
... print(dimobj)
<class 'netCDF4._netCDF4.Dimension'> (unlimited): name = 'level', size = 0
<class 'netCDF4._netCDF4.Dimension'> (unlimited): name = 'time', size = 0
@@ -773,7 +792,7 @@ Variables in a netCDF file
method returns an instance of the Variable
class whose methods can be
used later to access and set variable data and attributes.
->>> times = rootgrp.createVariable("time","f8",("time",))
+>>> times = rootgrp.createVariable("time","f8",("time",))
>>> levels = rootgrp.createVariable("level","i4",("level",))
>>> latitudes = rootgrp.createVariable("lat","f4",("lat",))
>>> longitudes = rootgrp.createVariable("lon","f4",("lon",))
@@ -785,7 +804,7 @@ Variables in a netCDF file
To get summary info on a Variable
instance in an interactive session,
just print it.
->>> print(temp)
+>>> print(temp)
<class 'netCDF4._netCDF4.Variable'>
float32 temp(time, level, lat, lon)
units: K
@@ -796,7 +815,7 @@ Variables in a netCDF file
You can use a path to create a Variable inside a hierarchy of groups.
->>> ftemp = rootgrp.createVariable("/forecasts/model1/temp","f4",("time","level","lat","lon",))
+>>> ftemp = rootgrp.createVariable("/forecasts/model1/temp","f4",("time","level","lat","lon",))
If the intermediate groups do not yet exist, they will be created.
@@ -804,7 +823,7 @@ Variables in a netCDF file
You can also query a Dataset
or Group
instance directly to obtain Group
or
Variable
instances using paths.
->>> print(rootgrp["/forecasts/model1"]) # a Group instance
+>>> print(rootgrp["/forecasts/model1"]) # a Group instance
<class 'netCDF4._netCDF4.Group'>
group /forecasts/model1:
dimensions(sizes):
@@ -822,7 +841,7 @@ Variables in a netCDF file
All of the variables in the Dataset
or Group
are stored in a
Python dictionary, in the same way as the dimensions:
->>> print(rootgrp.variables)
+>>> print(rootgrp.variables)
{'time': <class 'netCDF4._netCDF4.Variable'>
float64 time(time)
unlimited dimensions: time
@@ -865,7 +884,7 @@ Attributes in a netCDF file
variables. Attributes can be strings, numbers or sequences. Returning to
our example,
->>> import time
+>>> import time
>>> rootgrp.description = "bogus example script"
>>> rootgrp.history = "Created " + time.ctime(time.time())
>>> rootgrp.source = "netCDF4 python module tutorial"
@@ -883,7 +902,7 @@ Attributes in a netCDF file
built-in dir
Python function will return a bunch of private methods
and attributes that cannot (or should not) be modified by the user.
->>> for name in rootgrp.ncattrs():
+>>> for name in rootgrp.ncattrs():
... print("Global attr {} = {}".format(name, getattr(rootgrp, name)))
Global attr description = bogus example script
Global attr history = Created Mon Jul 8 14:19:41 2019
@@ -894,7 +913,7 @@ Attributes in a netCDF file
instance provides all the netCDF attribute name/value pairs in a python
dictionary:
->>> print(rootgrp.__dict__)
+>>> print(rootgrp.__dict__)
{'description': 'bogus example script', 'history': 'Created Mon Jul 8 14:19:41 2019', 'source': 'netCDF4 python module tutorial'}
@@ -907,7 +926,7 @@ Writing data
Now that you have a netCDF Variable
instance, how do you put data
into it? You can just treat it like an array and assign data to a slice.
->>> import numpy as np
+>>> import numpy as np
>>> lats = np.arange(-90,91,2.5)
>>> lons = np.arange(-180,180,2.5)
>>> latitudes[:] = lats
@@ -927,7 +946,7 @@ Writing data
objects with unlimited dimensions will grow along those dimensions if you
assign data outside the currently defined range of indices.
->>> # append along two unlimited dimensions by assigning to slice.
+>>> # append along two unlimited dimensions by assigning to slice.
>>> nlats = len(rootgrp.dimensions["lat"])
>>> nlons = len(rootgrp.dimensions["lon"])
>>> print("temp shape before adding data = {}".format(temp.shape))
@@ -947,7 +966,7 @@ Writing data
along the level
dimension of the variable temp
, even though no
data has yet been assigned to levels.
->>> # now, assign data to levels dimension variable.
+>>> # now, assign data to levels dimension variable.
>>> levels[:] = [1000.,850.,700.,500.,300.,250.,200.,150.,100.,50.]
@@ -960,7 +979,7 @@ Writing data
allowed, and these indices work independently along each dimension (similar
to the way vector subscripts work in fortran). This means that
->>> temp[0, 0, [0,1,2,3], [0,1,2,3]].shape
+>>> temp[0, 0, [0,1,2,3], [0,1,2,3]].shape
(4, 4)
@@ -978,14 +997,14 @@ Writing data
For example,
->>> tempdat = temp[::2, [1,3,6], lats>0, lons>0]
+>>> tempdat = temp[::2, [1,3,6], lats>0, lons>0]
will extract time indices 0,2 and 4, pressure levels
850, 500 and 200 hPa, all Northern Hemisphere latitudes and Eastern
Hemisphere longitudes, resulting in a numpy array of shape (3, 3, 36, 71).
->>> print("shape of fancy temp slice = {}".format(tempdat.shape))
+>>> print("shape of fancy temp slice = {}".format(tempdat.shape))
shape of fancy temp slice = (3, 3, 36, 71)
@@ -1018,7 +1037,7 @@ Dealing with time coordinates
provided by cftime to do just that.
Here's an example of how they can be used:
->>> # fill in times.
+>>> # fill in times.
>>> from datetime import datetime, timedelta
>>> from cftime import num2date, date2num
>>> dates = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(temp.shape[0])]
@@ -1058,7 +1077,7 @@ Reading data from a multi
NETCDF4_CLASSIC
format (NETCDF4
formatted multi-file
datasets are not supported).
->>> for nf in range(10):
+>>> for nf in range(10):
... with Dataset("mftest%s.nc" % nf, "w", format="NETCDF4_CLASSIC") as f:
... _ = f.createDimension("x",None)
... x = f.createVariable("x","i",("x",))
@@ -1067,7 +1086,7 @@ Reading data from a multi
Now read all the files back in at once with MFDataset
->>> from netCDF4 import MFDataset
+>>> from netCDF4 import MFDataset
>>> f = MFDataset("mftest*nc")
>>> print(f.variables["x"][:])
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
@@ -1134,22 +1153,22 @@ Efficient compression of netC
In our example, try replacing the line
->>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",))
+>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",))
with
->>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib')
+>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib')
and then
->>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib',least_significant_digit=3)
+>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib',least_significant_digit=3)
or with netcdf-c >= 4.9.0
->>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib',significant_digits=4)
+>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib',significant_digits=4)
and see how much smaller the resulting files are.
@@ -1170,7 +1189,7 @@ Beyond ho
Since there is no native complex data type in netcdf, compound types are handy
for storing numpy complex arrays. Here's an example:
->>> f = Dataset("complex.nc","w")
+>>> f = Dataset("complex.nc","w")
>>> size = 3 # length of 1-d complex array
>>> # create sample complex data.
>>> datac = np.exp(1j*(1.+np.linspace(0, np.pi, size)))
@@ -1206,7 +1225,7 @@ Beyond ho
in a Python dictionary, just like variables and dimensions. As always, printing
objects gives useful summary information in an interactive session:
->>> print(f)
+>>> print(f)
<class 'netCDF4._netCDF4.Dataset'>
root group (NETCDF4 data model, file format HDF5):
dimensions(sizes): x_dim(3)
@@ -1231,7 +1250,7 @@ Variable-length (vlen) data types
data type, use the Dataset.createVLType
method
method of a Dataset
or Group
instance.
->>> f = Dataset("tst_vlen.nc","w")
+>>> f = Dataset("tst_vlen.nc","w")
>>> vlen_t = f.createVLType(np.int32, "phony_vlen")
@@ -1241,7 +1260,7 @@ Variable-length (vlen) data types
but compound data types cannot.
A new variable can then be created using this datatype.
->>> x = f.createDimension("x",3)
+>>> x = f.createDimension("x",3)
>>> y = f.createDimension("y",4)
>>> vlvar = f.createVariable("phony_vlen_var", vlen_t, ("y","x"))
@@ -1254,7 +1273,7 @@ Variable-length (vlen) data types
In this case, they contain 1-D numpy int32
arrays of random length between
1 and 10.
->>> import random
+>>> import random
>>> random.seed(54321)
>>> data = np.empty(len(y)*len(x),object)
>>> for n in range(len(y)*len(x)):
@@ -1294,7 +1313,7 @@ Variable-length (vlen) data types
with fixed length greater than 1) when calling the
Dataset.createVariable
method.
->>> z = f.createDimension("z",10)
+>>> z = f.createDimension("z",10)
>>> strvar = f.createVariable("strvar", str, "z")
@@ -1302,7 +1321,7 @@ Variable-length (vlen) data types
random lengths between 2 and 12 characters, and the data in the object
array is assigned to the vlen string variable.
->>> chars = "1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+>>> chars = "1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
>>> data = np.empty(10,"O")
>>> for n in range(10):
... stringlen = random.randint(2,12)
@@ -1341,7 +1360,7 @@ Enum data type
values and their names are used to define an Enum data type using
Dataset.createEnumType
.
->>> nc = Dataset('clouds.nc','w')
+>>> nc = Dataset('clouds.nc','w')
>>> # python dict with allowed values and their names.
>>> enum_dict = {'Altocumulus': 7, 'Missing': 255,
... 'Stratus': 2, 'Clear': 0,
@@ -1359,7 +1378,7 @@ Enum data type
is made to write an integer value not associated with one of the
specified names.
->>> time = nc.createDimension('time',None)
+>>> time = nc.createDimension('time',None)
>>> # create a 1d variable of type 'cloud_type'.
>>> # The fill_value is set to the 'Missing' named value.
>>> cloud_var = nc.createVariable('primary_cloud',cloud_type,'time',
@@ -1396,7 +1415,7 @@ Parallel IO
available. To use parallel IO, your program must be running in an MPI
environment using mpi4py.
->>> from mpi4py import MPI
+>>> from mpi4py import MPI
>>> import numpy as np
>>> from netCDF4 import Dataset
>>> rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
@@ -1408,7 +1427,7 @@ Parallel IO
when a new dataset is created or an existing dataset is opened,
use the parallel
keyword to enable parallel access.
->>> nc = Dataset('parallel_test.nc','w',parallel=True)
+>>> nc = Dataset('parallel_test.nc','w',parallel=True)
The optional comm
keyword may be used to specify a particular
@@ -1416,7 +1435,7 @@
Parallel IO
can now write to the file indepedently. In this example the process rank is
written to a different variable index on each task
->>> d = nc.createDimension('dim',4)
+>>> d = nc.createDimension('dim',4)
>>> v = nc.createVariable('var', np.int64, 'dim')
>>> v[rank] = rank
>>> nc.close()
@@ -1483,7 +1502,7 @@ Dealing with strings
stringtochar
is used to convert the numpy string array to an array of
characters with one more dimension. For example,
->>> from netCDF4 import stringtochar
+>>> from netCDF4 import stringtochar
>>> nc = Dataset('stringtest.nc','w',format='NETCDF4_CLASSIC')
>>> _ = nc.createDimension('nchars',3)
>>> _ = nc.createDimension('nstrings',None)
@@ -1516,7 +1535,7 @@ Dealing with strings
character array dtype under the hood when creating the netcdf compound type.
Here's an example:
->>> nc = Dataset('compoundstring_example.nc','w')
+>>> nc = Dataset('compoundstring_example.nc','w')
>>> dtype = np.dtype([('observation', 'f4'),
... ('station_name','S10')])
>>> station_data_t = nc.createCompoundType(dtype,'station_data')
@@ -1561,7 +1580,7 @@ In-memory (diskless) Datasets
object representing the Dataset. Below are examples illustrating both
approaches.
->>> # create a diskless (in-memory) Dataset,
+>>> # create a diskless (in-memory) Dataset,
>>> # and persist the file to disk when it is closed.
>>> nc = Dataset('diskless_example.nc','w',diskless=True,persist=True)
>>> d = nc.createDimension('x',None)
@@ -1623,7 +1642,7 @@ In-memory (diskless) Datasets
the parallel IO example, which is in examples/mpi_example.py
.
Unit tests are in the test
directory.
-contact: Jeffrey Whitaker jeffrey.s.whitaker@noaa.gov
+contact: Jeffrey Whitaker jeffrey.s.whitaker@noaa.gov
copyright: 2008 by Jeffrey Whitaker.
@@ -1636,7 +1655,7 @@ In-memory (diskless) Datasets
View Source
- # init for netCDF4. package
+ # init for netCDF4. package
# Docstring comes from extension module _netCDF4.
from ._netCDF4 import *
# Need explicit imports for names beginning with underscores
@@ -1650,12 +1669,13 @@ In-memory (diskless) Datasets
__has_bzip2_support__, __has_blosc_support__, __has_szip_support__)
import os
__all__ =\
-['Dataset','Variable','Dimension','Group','MFDataset','MFTime','CompoundType','VLType','date2num','num2date','date2index','stringtochar','chartostring','stringtoarr','getlibversion','EnumType','get_chunk_cache','set_chunk_cache']
+['Dataset','Variable','Dimension','Group','MFDataset','MFTime','CompoundType','VLType','date2num','num2date','date2index','stringtochar','chartostring','stringtoarr','getlibversion','EnumType','get_chunk_cache','set_chunk_cache','set_alignment','get_alignment']
# if HDF5_PLUGIN_PATH not set, point to package path if plugins live there
+pluginpath = os.path.join(__path__[0],'plugins')
if 'HDF5_PLUGIN_PATH' not in os.environ and\
- (os.path.exists(os.path.join(__path__[0],'lib__nczhdf5filters.so')) or\
- os.path.exists(os.path.join(__path__[0],'lib__nczhdf5filters.dylib'))):
- os.environ['HDF5_PLUGIN_PATH']=__path__[0]
+ (os.path.exists(os.path.join(pluginpath,'lib__nczhdf5filters.so')) or\
+ os.path.exists(os.path.join(pluginpath,'lib__nczhdf5filters.dylib'))):
+ os.environ['HDF5_PLUGIN_PATH']=pluginpath
@@ -1670,7 +1690,7 @@ In-memory (diskless) Datasets
Dataset:
-
+
A netCDF Dataset
is a collection of dimensions, groups, variables and
attributes. Together they describe the meaning of data and relations among
data fields stored in a netCDF file. See Dataset.__init__
for more
@@ -1748,7 +1768,7 @@
In-memory (diskless) Datasets
Dataset()
-
+
__init__(self, filename, mode="r", clobber=True, diskless=False,
persist=False, keepweakref=False, memory=None, encoding=None,
parallel=False, comm=None, info=None, format='NETCDF4')
@@ -1854,7 +1874,7 @@ In-memory (diskless) Datasets
filepath(unknown):
-
+
filepath(self,encoding=None)
Get the file system path (or the opendap URL) which was used to
@@ -1873,7 +1893,7 @@
In-memory (diskless) Datasets
close(unknown):
-
+
close(self)
Close the Dataset.
@@ -1889,7 +1909,7 @@ In-memory (diskless) Datasets
isopen(unknown):
-
+
isopen(self)
Is the Dataset open or closed?
@@ -1905,7 +1925,7 @@ In-memory (diskless) Datasets
sync(unknown):
-
+
sync(self)
Writes all buffered data in the Dataset
to the disk file.
@@ -1921,7 +1941,7 @@ In-memory (diskless) Datasets
set_fill_on(unknown):
-
+
set_fill_on(self)
Sets the fill mode for a Dataset
open for writing to on
.
@@ -1945,7 +1965,7 @@ In-memory (diskless) Datasets
set_fill_off(unknown):
-
+
set_fill_off(self)
Sets the fill mode for a Dataset
open for writing to off
.
@@ -1965,7 +1985,7 @@ In-memory (diskless) Datasets
createDimension(unknown):
-
+
createDimension(self, dimname, size=None)
Creates a new dimension with the given dimname
and size
.
@@ -1989,7 +2009,7 @@ In-memory (diskless) Datasets
renameDimension(unknown):
-
+
renameDimension(self, oldname, newname)
rename a Dimension
named oldname
to newname
.
@@ -2005,7 +2025,7 @@ In-memory (diskless) Datasets
createCompoundType(unknown):
-
+
createCompoundType(self, datatype, datatype_name)
Creates a new compound data type named datatype_name
from the numpy
@@ -2030,7 +2050,7 @@
In-memory (diskless) Datasets
createVLType(unknown):
-
+
createVLType(self, datatype, datatype_name)
Creates a new VLEN data type named datatype_name
from a numpy
@@ -2050,7 +2070,7 @@
In-memory (diskless) Datasets
createEnumType(unknown):
-
+
createEnumType(self, datatype, datatype_name, enum_dict)
Creates a new Enum data type named datatype_name
from a numpy
@@ -2071,7 +2091,7 @@
In-memory (diskless) Datasets
createVariable(unknown):
-
+
createVariable(self, varname, datatype, dimensions=(), compression=None, zlib=False,
complevel=4, shuffle=True, fletcher32=False, contiguous=False, chunksizes=None,
szip_coding='nn', szip_pixels_per_block=8, blosc_shuffle=1,
@@ -2166,7 +2186,7 @@ In-memory (diskless) Datasets
The optional keyword fill_value
can be used to override the default
netCDF _FillValue
(the value that the variable gets filled with before
-any data is written to it, defaults given in the dict netCDF4.default_fillvals
).
+any data is written to it, defaults given in the dict netCDF4.default_fillvals
).
If fill_value is set to False
, then the variable is not pre-filled.
If the optional keyword parameters least_significant_digit
or significant_digits
are
@@ -2234,7 +2254,7 @@
In-memory (diskless) Datasets
renameVariable(unknown):
-
+
renameVariable(self, oldname, newname)
rename a Variable
named oldname
to newname
@@ -2250,7 +2270,7 @@ In-memory (diskless) Datasets
createGroup(unknown):
-
+
createGroup(self, groupname)
Creates a new Group
with the given groupname
.
@@ -2276,7 +2296,7 @@ In-memory (diskless) Datasets
ncattrs(unknown):
-
+
ncattrs(self)
return netCDF global attribute names for this Dataset
or Group
in a list.
@@ -2292,7 +2312,7 @@ In-memory (diskless) Datasets
setncattr(unknown):
-
+
setncattr(self,name,value)
set a netCDF dataset or group attribute using name,value pair.
@@ -2310,7 +2330,7 @@
In-memory (diskless) Datasets
setncattr_string(unknown):
-
+
setncattr_string(self,name,value)
set a netCDF dataset or group string attribute using name,value pair.
@@ -2328,7 +2348,7 @@
In-memory (diskless) Datasets
setncatts(unknown):
-
+
setncatts(self,attdict)
set a bunch of netCDF dataset or group attributes at once using a python dictionary.
@@ -2347,7 +2367,7 @@
In-memory (diskless) Datasets
getncattr(unknown):
-
+
getncattr(self,name)
retrieve a netCDF dataset or group attribute.
@@ -2368,7 +2388,7 @@
In-memory (diskless) Datasets
delncattr(unknown):
-
+
delncattr(self,name,value)
delete a netCDF dataset or group attribute. Use if you need to delete a
@@ -2386,7 +2406,7 @@
In-memory (diskless) Datasets
renameAttribute(unknown):
-
+
renameAttribute(self, oldname, newname)
rename a Dataset
or Group
attribute named oldname
to newname
.
@@ -2402,7 +2422,7 @@ In-memory (diskless) Datasets
renameGroup(unknown):
-
+
renameGroup(self, oldname, newname)
rename a Group
named oldname
to newname
(requires netcdf >= 4.3.1).
@@ -2418,7 +2438,7 @@ In-memory (diskless) Datasets
set_auto_chartostring(unknown):
-
+
set_auto_chartostring(self, True_or_False)
Call Variable.set_auto_chartostring
for all variables contained in this Dataset
or
@@ -2443,7 +2463,7 @@
In-memory (diskless) Datasets
set_auto_maskandscale(unknown):
-
+
set_auto_maskandscale(self, True_or_False)
Call Variable.set_auto_maskandscale
for all variables contained in this Dataset
or
@@ -2466,7 +2486,7 @@
In-memory (diskless) Datasets
set_auto_mask(unknown):
-
+
set_auto_mask(self, True_or_False)
Call Variable.set_auto_mask
for all variables contained in this Dataset
or
@@ -2490,7 +2510,7 @@
In-memory (diskless) Datasets
set_auto_scale(unknown):
-
+
set_auto_scale(self, True_or_False)
Call Variable.set_auto_scale
for all variables contained in this Dataset
or
@@ -2513,7 +2533,7 @@
In-memory (diskless) Datasets
set_always_mask(unknown):
-
+
set_always_mask(self, True_or_False)
Call Variable.set_always_mask
for all variables contained in
@@ -2541,7 +2561,7 @@
In-memory (diskless) Datasets
set_ncstring_attrs(unknown):
-
+
set_ncstring_attrs(self, True_or_False)
Call Variable.set_ncstring_attrs
for all variables contained in
@@ -2566,7 +2586,7 @@
In-memory (diskless) Datasets
get_variables_by_attributes(unknown):
-
+
get_variables_by_attribute(self, **kwargs)
Returns a list of variables that match specific conditions.
@@ -2574,7 +2594,7 @@ In-memory (diskless) Datasets
Can pass in key=value parameters and variables are returned that
contain all of the matches. For example,
->>> # Get variables with x-axis attribute.
+>>> # Get variables with x-axis attribute.
>>> vs = nc.get_variables_by_attributes(axis='X')
>>> # Get variables with matching "standard_name" attribute
>>> vs = nc.get_variables_by_attributes(standard_name='northward_sea_water_velocity')
@@ -2585,7 +2605,7 @@ In-memory (diskless) Datasets
the attribute value. None is given as the attribute value when the
attribute does not exist on the variable. For example,
->>> # Get Axis variables
+>>> # Get Axis variables
>>> vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T'])
>>> # Get variables that don't have an "axis" attribute
>>> vs = nc.get_variables_by_attributes(axis=lambda v: v is None)
@@ -2604,7 +2624,7 @@ In-memory (diskless) Datasets
fromcdl(unknown):
-
+
-
+
+
+
+
+
+
+ has_blosc_filter(self)
+returns True if blosc compression filter is available
+
+
+
+
+
+
+
+
+ has_zstd_filter(self)
+returns True if zstd compression filter is available
+
+
+
+
+
+
+
+
+ has_bzip2_filter(self)
+returns True if bzip2 compression filter is available
+
+
+
+
+
+
+
+
+ has_szip_filter(self)
+returns True if szip compression filter is available
+
+
+
+
string name of Group instance
@@ -2664,109 +2745,121 @@ In-memory (diskless) Datasets
+
+
+
#  
- disk_format = <attribute 'disk_format' of 'netCDF4._netCDF4.Dataset' objects>
+ disk_format
+
+
+
#  
- file_format = <attribute 'file_format' of 'netCDF4._netCDF4.Dataset' objects>
+ file_format
+
+
+
+
+
#  
- keepweakref = <attribute 'keepweakref' of 'netCDF4._netCDF4.Dataset' objects>
+ keepweakref
+
@@ -2779,7 +2872,7 @@ In-memory (diskless) Datasets
Variable:
-
+
A netCDF Variable
is used to read and write netCDF data. They are
analogous to numpy array objects. See Variable.__init__
for more
details.
@@ -2863,7 +2956,7 @@ In-memory (diskless) Datasets
Variable()
-
+
__init__(self, group, name, datatype, dimensions=(), compression=None, zlib=False,
complevel=4, shuffle=True, szip_coding='nn', szip_pixels_per_block=8,
blosc_shuffle=1, fletcher32=False, contiguous=False,
@@ -2984,7 +3077,7 @@ In-memory (diskless) Datasets
value that the variable gets filled with before any data is written to it)
is replaced with this value. If fill_value is set to False
, then
the variable is not pre-filled. The default netCDF fill values can be found
-in the dictionary netCDF4.default_fillvals
.
+in the dictionary netCDF4.default_fillvals
.
chunk_cache
: If specified, sets the chunk cache size for this variable.
Persists as long as Dataset is open. Use set_var_chunk_cache
to
@@ -3005,7 +3098,7 @@
In-memory (diskless) Datasets
group(unknown):
-
+
group(self)
return the group that this Variable
is a member of.
@@ -3021,7 +3114,7 @@ In-memory (diskless) Datasets
ncattrs(unknown):
-
+
ncattrs(self)
return netCDF attribute names for this Variable
in a list.
@@ -3037,7 +3130,7 @@ In-memory (diskless) Datasets
setncattr(unknown):
-
+
setncattr(self,name,value)
set a netCDF variable attribute using name,value pair. Use if you need to set a
@@ -3055,7 +3148,7 @@
In-memory (diskless) Datasets
setncattr_string(unknown):
-
+
setncattr_string(self,name,value)
set a netCDF variable string attribute using name,value pair.
@@ -3074,7 +3167,7 @@
In-memory (diskless) Datasets
setncatts(unknown):
-
+
setncatts(self,attdict)
set a bunch of netCDF variable attributes at once using a python dictionary.
@@ -3093,7 +3186,7 @@
In-memory (diskless) Datasets
getncattr(unknown):
-
+
getncattr(self,name)
retrieve a netCDF variable attribute. Use if you need to set a
@@ -3114,7 +3207,7 @@
In-memory (diskless) Datasets
delncattr(unknown):
-
+
delncattr(self,name,value)
delete a netCDF variable attribute. Use if you need to delete a
@@ -3132,7 +3225,7 @@
In-memory (diskless) Datasets
filters(unknown):
-
+
filters(self)
return dictionary containing HDF5 filter parameters.
@@ -3148,7 +3241,7 @@ In-memory (diskless) Datasets
quantization(unknown):
-
+
quantization(self)
return number of significant digits and the algorithm used in quantization.
@@ -3165,7 +3258,7 @@
In-memory (diskless) Datasets
endian(unknown):
-
+
endian(self)
return endian-ness (little,big,native
) of variable (as stored in HDF5 file).
@@ -3181,7 +3274,7 @@ In-memory (diskless) Datasets
chunking(unknown):
-
+
chunking(self)
return variable chunking information. If the dataset is
@@ -3200,7 +3293,7 @@
In-memory (diskless) Datasets
get_var_chunk_cache(unknown):
-
+
get_var_chunk_cache(self)
return variable chunk cache information in a tuple (size,nelems,preemption).
@@ -3218,7 +3311,7 @@
In-memory (diskless) Datasets
set_var_chunk_cache(unknown):
-
+
set_var_chunk_cache(self,size=None,nelems=None,preemption=None)
change variable chunk cache settings.
@@ -3236,7 +3329,7 @@
In-memory (diskless) Datasets
renameAttribute(unknown):
-
+
renameAttribute(self, oldname, newname)
rename a Variable
attribute named oldname
to newname
.
@@ -3252,7 +3345,7 @@ In-memory (diskless) Datasets
assignValue(unknown):
-
+
assignValue(self, val)
assign a value to a scalar variable. Provided for compatibility with
@@ -3269,7 +3362,7 @@
In-memory (diskless) Datasets
getValue(unknown):
-
+
getValue(self)
get the value of a scalar variable. Provided for compatibility with
@@ -3286,7 +3379,7 @@
In-memory (diskless) Datasets
set_auto_chartostring(unknown):
-
+
set_auto_chartostring(self,chartostring)
turn on or off automatic conversion of character variable data to and
@@ -3317,7 +3410,7 @@
In-memory (diskless) Datasets
use_nc_get_vars(unknown):
-
+
use_nc_get_vars(self,_use_get_vars)
enable the use of netcdf library routine nc_get_vars
@@ -3337,7 +3430,7 @@
In-memory (diskless) Datasets
set_auto_maskandscale(unknown):
-
+
set_auto_maskandscale(self,maskandscale)
turn on or off automatic conversion of variable data to and
@@ -3401,7 +3494,7 @@
In-memory (diskless) Datasets
set_auto_scale(unknown):
-
+
set_auto_scale(self,scale)
turn on or off automatic packing/unpacking of variable
@@ -3450,7 +3543,7 @@
In-memory (diskless) Datasets
set_auto_mask(unknown):
-
+
set_auto_mask(self,mask)
turn on or off automatic conversion of variable data to and
@@ -3485,7 +3578,7 @@
In-memory (diskless) Datasets
set_always_mask(unknown):
-
+
set_always_mask(self,always_mask)
turn on or off conversion of data without missing values to regular
@@ -3508,7 +3601,7 @@
In-memory (diskless) Datasets
set_ncstring_attrs(unknown):
-
+
set_always_mask(self,ncstring_attrs)
turn on or off creating NC_STRING string attributes.
@@ -3530,7 +3623,7 @@ In-memory (diskless) Datasets
set_collective(unknown):
-
+
set_collective(self,True_or_False)
turn on or off collective parallel IO access. Ignored if file is not
@@ -3547,7 +3640,7 @@
In-memory (diskless) Datasets
get_dims(unknown):
-
+
get_dims(self)
return a tuple of Dimension
instances associated with this
@@ -3559,9 +3652,10 @@
In-memory (diskless) Datasets
+
string name of Variable instance
@@ -3570,9 +3664,10 @@ In-memory (diskless) Datasets
+
numpy data type (for primitive data types) or
VLType/CompoundType/EnumType instance
(for compound, vlen or enum data types)
@@ -3583,9 +3678,10 @@ In-memory (diskless) Datasets
+
find current sizes of all variable dimensions
@@ -3594,9 +3690,10 @@ In-memory (diskless) Datasets
+
Return the number of stored elements.
@@ -3605,9 +3702,10 @@ In-memory (diskless) Datasets
+
get variables's dimension names
@@ -3616,55 +3714,61 @@ In-memory (diskless) Datasets
+
+
+
+
#  
- always_mask = <attribute 'always_mask' of 'netCDF4._netCDF4.Variable' objects>
+ always_mask
+
#  
- chartostring = <attribute 'chartostring' of 'netCDF4._netCDF4.Variable' objects>
+ chartostring
+
@@ -3677,7 +3781,7 @@ In-memory (diskless) Datasets
Dimension:
-
+
A netCDF Dimension
is used to describe the coordinates of a Variable
.
See Dimension.__init__
for more details.
@@ -3703,7 +3807,7 @@ In-memory (diskless) Datasets
Dimension()
-
+
__init__(self, group, name, size=None)
Dimension
constructor.
@@ -3729,7 +3833,7 @@ In-memory (diskless) Datasets
group(unknown):
-
+
group(self)
return the group that this Dimension
is a member of.
@@ -3745,7 +3849,7 @@ In-memory (diskless) Datasets
isunlimited(unknown):
-
+
isunlimited(self)
returns True
if the Dimension
instance is unlimited, False
otherwise.
@@ -3756,9 +3860,10 @@ In-memory (diskless) Datasets
+
string name of Dimension instance
@@ -3767,9 +3872,10 @@ In-memory (diskless) Datasets
+
current size of Dimension (calls len
on Dimension instance)
@@ -3785,7 +3891,7 @@ In-memory (diskless) Datasets
Group(netCDF4.Dataset):
-
+
Groups define a hierarchical namespace within a netCDF file. They are
analogous to directories in a unix filesystem. Each Group
behaves like
a Dataset
within a Dataset, and can contain it's own variables,
@@ -3809,7 +3915,7 @@
In-memory (diskless) Datasets
Group()
-
+
__init__(self, parent, name)
Group
constructor.
@@ -3833,7 +3939,7 @@ In-memory (diskless) Datasets
close(unknown):
-
+
close(self)
overrides Dataset
close method which does not apply to Group
@@ -3876,6 +3982,10 @@
Inherited Members
get_variables_by_attributes
fromcdl
tocdl
+ has_blosc_filter
+ has_zstd_filter
+ has_bzip2_filter
+ has_szip_filter
name
groups
dimensions
@@ -3903,7 +4013,7 @@ Inherited Members
MFDataset(netCDF4.Dataset):
-
+
Class for reading multi-file netCDF Datasets, making variables
spanning multiple files appear as if they were in one file.
Datasets must be in NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET
@@ -3913,7 +4023,7 @@ Inherited Members
Example usage (See MFDataset.__init__
for more details):
->>> import numpy as np
+>>> import numpy as np
>>> # create a series of netCDF files with a variable sharing
>>> # the same unlimited dimension.
>>> for nf in range(10):
@@ -3940,7 +4050,7 @@ Inherited Members
MFDataset(files, check=False, aggdim=None, exclude=[], master_file=None)
-
+
__init__(self, files, check=False, aggdim=None, exclude=[],
master_file=None)
@@ -3985,7 +4095,7 @@ Inherited Members
ncattrs(self):
-
+
ncattrs(self)
return the netcdf attribute names from the master file.
@@ -4001,7 +4111,7 @@ Inherited Members
close(self):
-
+
close(self)
close all the open files.
@@ -4042,6 +4152,10 @@ Inherited Members
get_variables_by_attributes
fromcdl
tocdl
+ has_blosc_filter
+ has_zstd_filter
+ has_bzip2_filter
+ has_szip_filter
name
groups
dimensions
@@ -4069,13 +4183,13 @@ Inherited Members
MFTime(netCDF4._netCDF4._Variable):
-
+
Class providing an interface to a MFDataset time Variable by imposing a unique common
time unit and/or calendar to all files.
Example usage (See MFTime.__init__
for more details):
->>> import numpy as np
+