diff --git a/docs/conf.py b/docs/conf.py
index 3f65b21b9a3..6141df3dfe6 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -392,3 +392,5 @@
# we are using non local images for badges. These will change so we dont
# want to store them locally.
suppress_warnings = ['image.nonlocal_uri']
+
+numfig=True
diff --git a/docs/dataset/dataset_design.rst b/docs/dataset/dataset_design.rst
new file mode 100644
index 00000000000..809de9b65a8
--- /dev/null
+++ b/docs/dataset/dataset_design.rst
@@ -0,0 +1,75 @@
+.. highlight:: python
+
+==============
+Dataset Design
+==============
+
+.. _sec:design_introduction:
+
+Introduction
+============
+
+.. _datasetdiagram:
+.. figure:: figures/datasetdiagram.svg
+ :align: center
+ :width: 100%
+
+ Basic workflow
+
+This document aims to explain the design and working of the QCoDeS DataSet.
+In :numref:`datasetdiagram` we sketch the basic design of the dataset.
+The dataset implementation is organised in 3 layers shown vertically in
+:numref:`datasetdiagram` Each of the layers implements functionality for
+reading and writing to the dataset. The layers are organised hierarchically
+with the top most one implementing a high level interface and the lowest
+layer implementing the communication with the database. This is done in order
+to facilitate two competing requirements. On one hand the dataset should
+be easy to use enabling simple and easy to use functionality for performing
+standard measurements with a minimum of typing. On the other hand the dataset
+should enable users to perform any measurement that they may find useful.
+It should not force the user into a specific measurement pattern that may be
+suboptimal for more advanced use cases. Specifically it should possible to
+formulate any experiment as python code using standard language constructs
+(for and while loops among others) with a minimal effort.
+
+The legacy QCoDeS dataset ``qcodes.data`` and loop ``qcodes.Loop`` is
+primarily oriented towards ease of use for the standard use case but makes
+it challenging to formulate more complicated experiments without significant
+work reformatting the experiments in a counterintuitive way.
+
+
+The QCoDeS dataset currently implements two
+interfaces directly targeting end users. It is not expected that the user
+of QCoDeS will need to interface directly with the lowest layer communicating
+with the database.
+
+The ``dataset`` layer defined in the :ref:`dataset-spec` provides the most
+flexible user facing layer. Insert reference to notebook. but requires users
+to manually register `ParamSpecs`. The dataset implements two functions for
+inserting one or more rows of data into the dataset and immediately writes it
+to disk. It is, however, the users responsibility to ensure good performance
+by writing to disk at suitable intervals.
+
+The measurement context manager layer provides additional support for flushing
+data to disk at selected intervals for better performance without manual
+intervention. It also provides easy registration of ParamSpecs on
+the basis of QCoDeS parameters or custom parameters.
+
+But importantly it does not:
+
+* Automatically infer the relationship between dependent and independent
+ parameters. The user must supply this metadata for correct plotting.
+* Automatically register parameters.
+* Enforce any structure on the measured data. (1D, on a grid ect.)
+ This may make plotting more difficult as any structure will have to
+
+
+It is envisioned that a future layer is added on top of the existing layers
+to automatically register parameters and save data at the cost of being
+able to write the measurement routine as pure python functions.
+
+We note that the dataset currently exclusively supports storing data in an
+SQLite database. This is not an intrinsic limitation of the dataset and
+measurement layer. It is possible that at a future state support for writing
+to a different backend will be added.
+
diff --git a/docs/dataset/figures/bad_trees.pdf b/docs/dataset/figures/bad_trees.pdf
new file mode 100644
index 00000000000..b92a54cdb5f
Binary files /dev/null and b/docs/dataset/figures/bad_trees.pdf differ
diff --git a/docs/dataset/figures/bad_trees.svg b/docs/dataset/figures/bad_trees.svg
new file mode 100644
index 00000000000..274efc947ce
--- /dev/null
+++ b/docs/dataset/figures/bad_trees.svg
@@ -0,0 +1,173 @@
+
+
diff --git a/docs/dataset/figures/bad_trees_remedied.pdf b/docs/dataset/figures/bad_trees_remedied.pdf
new file mode 100644
index 00000000000..522216425b7
Binary files /dev/null and b/docs/dataset/figures/bad_trees_remedied.pdf differ
diff --git a/docs/dataset/figures/bad_trees_remedied.svg b/docs/dataset/figures/bad_trees_remedied.svg
new file mode 100644
index 00000000000..58587680c7e
--- /dev/null
+++ b/docs/dataset/figures/bad_trees_remedied.svg
@@ -0,0 +1,262 @@
+
+
diff --git a/docs/dataset/figures/datasetdiagram.puml b/docs/dataset/figures/datasetdiagram.puml
new file mode 100644
index 00000000000..84530cb9a43
--- /dev/null
+++ b/docs/dataset/figures/datasetdiagram.puml
@@ -0,0 +1,39 @@
+@startuml
+
+package "measurements.py" {
+ [DataSaver]
+ [Runner]
+ [Measurement]
+}
+package "data_set.py" {
+ [DataSet]
+}
+package "sqlite_base.py" {
+ [sqlite functions]
+}
+
+package "experiment_container.py" {
+ [Experiment]
+}
+
+package "param_spec.py" {
+ [ParamSpec]
+}
+
+database "SQLite" {
+ [experiment.db]
+}
+
+[Measurement] -> [Runner] : Calling 'run' creates:
+[Measurement] --> [ParamSpec] : Registers instances of:
+[Runner] --> [DataSet] : '~__enter~__' creates:\n'~__exit~__' flushes:
+[Runner] -> [DataSaver] : '~__enter__' returns:
+[Runner] --> [Experiment] : Creats DataSet with ref to:
+[DataSaver] --> [DataSet] : Stores data via:
+[DataSet] -> [ParamSpec] : Holds instances of:
+[DataSet] --> [sqlite functions] : Inserts data into DB
+[Experiment] --> [sqlite functions] : Creates experiments in DB
+[sqlite functions] --> [experiment.db] : SQL calls
+
+
+@enduml
diff --git a/docs/dataset/figures/datasetdiagram.svg b/docs/dataset/figures/datasetdiagram.svg
new file mode 100644
index 00000000000..cf20179c321
--- /dev/null
+++ b/docs/dataset/figures/datasetdiagram.svg
@@ -0,0 +1,52 @@
+
\ No newline at end of file
diff --git a/docs/dataset/figures/dependencies_01.pdf b/docs/dataset/figures/dependencies_01.pdf
new file mode 100644
index 00000000000..6feed632783
Binary files /dev/null and b/docs/dataset/figures/dependencies_01.pdf differ
diff --git a/docs/dataset/figures/dependencies_01.svg b/docs/dataset/figures/dependencies_01.svg
new file mode 100644
index 00000000000..0ec76e5e4d6
--- /dev/null
+++ b/docs/dataset/figures/dependencies_01.svg
@@ -0,0 +1,238 @@
+
+
diff --git a/docs/dataset/figures/dependencies_02.pdf b/docs/dataset/figures/dependencies_02.pdf
new file mode 100644
index 00000000000..a597dc46d6c
Binary files /dev/null and b/docs/dataset/figures/dependencies_02.pdf differ
diff --git a/docs/dataset/figures/dependencies_02.svg b/docs/dataset/figures/dependencies_02.svg
new file mode 100644
index 00000000000..50b6a88039f
--- /dev/null
+++ b/docs/dataset/figures/dependencies_02.svg
@@ -0,0 +1,233 @@
+
+
diff --git a/docs/dataset/figures/example1_conductance.pdf b/docs/dataset/figures/example1_conductance.pdf
new file mode 100644
index 00000000000..0a27b1123b4
Binary files /dev/null and b/docs/dataset/figures/example1_conductance.pdf differ
diff --git a/docs/dataset/figures/example1_conductance.svg b/docs/dataset/figures/example1_conductance.svg
new file mode 100644
index 00000000000..46689ccb9cb
--- /dev/null
+++ b/docs/dataset/figures/example1_conductance.svg
@@ -0,0 +1,209 @@
+
+
diff --git a/docs/dataset/figures/example2_compensation_A.pdf b/docs/dataset/figures/example2_compensation_A.pdf
new file mode 100644
index 00000000000..9800b9c10f0
Binary files /dev/null and b/docs/dataset/figures/example2_compensation_A.pdf differ
diff --git a/docs/dataset/figures/example2_compensation_A.svg b/docs/dataset/figures/example2_compensation_A.svg
new file mode 100644
index 00000000000..0c53c62a230
--- /dev/null
+++ b/docs/dataset/figures/example2_compensation_A.svg
@@ -0,0 +1,209 @@
+
+
diff --git a/docs/dataset/figures/example2_compensation_B.pdf b/docs/dataset/figures/example2_compensation_B.pdf
new file mode 100644
index 00000000000..e43b5fc10c6
Binary files /dev/null and b/docs/dataset/figures/example2_compensation_B.pdf differ
diff --git a/docs/dataset/figures/example2_compensation_B.svg b/docs/dataset/figures/example2_compensation_B.svg
new file mode 100644
index 00000000000..4765c53d721
--- /dev/null
+++ b/docs/dataset/figures/example2_compensation_B.svg
@@ -0,0 +1,218 @@
+
+
diff --git a/docs/dataset/figures/good_trees_complex.pdf b/docs/dataset/figures/good_trees_complex.pdf
new file mode 100644
index 00000000000..36b19fffd7b
Binary files /dev/null and b/docs/dataset/figures/good_trees_complex.pdf differ
diff --git a/docs/dataset/figures/good_trees_complex.svg b/docs/dataset/figures/good_trees_complex.svg
new file mode 100644
index 00000000000..b54a9440507
--- /dev/null
+++ b/docs/dataset/figures/good_trees_complex.svg
@@ -0,0 +1,276 @@
+
+
diff --git a/docs/dataset/figures/sqlite_layout.pdf b/docs/dataset/figures/sqlite_layout.pdf
new file mode 100644
index 00000000000..311a54ba524
Binary files /dev/null and b/docs/dataset/figures/sqlite_layout.pdf differ
diff --git a/docs/dataset/figures/sqlite_layout.xlsx b/docs/dataset/figures/sqlite_layout.xlsx
new file mode 100644
index 00000000000..5ac46294a1d
Binary files /dev/null and b/docs/dataset/figures/sqlite_layout.xlsx differ
diff --git a/docs/dataset/index.rst b/docs/dataset/index.rst
new file mode 100644
index 00000000000..d25892f4e26
--- /dev/null
+++ b/docs/dataset/index.rst
@@ -0,0 +1,11 @@
+.. _dataset:
+
+DataSet
+=======
+
+.. toctree::
+ :maxdepth: 2
+
+ spec
+ dataset_design
+ interdependentparams
diff --git a/docs/dataset/interdependentparams.rst b/docs/dataset/interdependentparams.rst
new file mode 100644
index 00000000000..43c43f45659
--- /dev/null
+++ b/docs/dataset/interdependentparams.rst
@@ -0,0 +1,444 @@
+.. highlight:: python
+
+=========================
+Interdependent Parameters
+=========================
+
+.. _sec:introduction:
+
+Introduction
+============
+
+At the heart of a measurement lies the concept of dependent and
+independent variables. A physics experiment consists in its core of
+varying something and observing how something else changes depending on
+that first varied thing. For the QCoDeS dataset to be a faithful
+representation of actual physics experiments, the dataset must preserve
+this notion of dependencies. In this small note, we present some
+thoughts on this subject and present the current state of the dataset.
+
+.. _sec:sett-gener-stage:
+
+Setting the general stage
+=========================
+
+In the general case, an experiment looks as follows. We seek to study
+how :math:`B` depends on :math:`A`. Unfortunately, we can neither set
+:math:`A` nor measure :math:`B`. What we *can* do, however, is to vary
+:math:`n` parameters :math:`x_1,x_2,\ldots,x_n` (:math:`\boldsymbol{x}`
+for brevity) and make the assumption that :math:`A=A(\boldsymbol{x})`.
+Similarly, we have access to measure :math:`m` other parameters,
+:math:`y_1,y_2,\ldots,y_m` (:math:`\boldsymbol{y}` for brevity) and
+assume that :math:`B=B(\boldsymbol{y})`. It generally holds that each
+:math:`y_i` depends on :math:`\boldsymbol{x}`, although many such
+dependencies may be trivial [1]_. Given :math:`\boldsymbol{x}` and
+:math:`\boldsymbol{y}` (i.e. a laboratory) it is by no means an easy
+exercise to find a relation :math:`A(B)` for which the above assumptions
+hold. That search is indeed the whole exercise of experimental physics,
+but as far as QCoDeS and the dataset is concerned, we must take for
+granted that :math:`A` and :math:`B` exist and satisfy the assumptions.
+
+.. _sec:good-scient-pract:
+
+Good scientific practice and measurement intentions
+===================================================
+
+In this section, we assume :math:`A` and :math:`B` to be scalars. We
+treat the general case in the next section.
+
+In a measurement of :math:`B` versus :math:`A`, it seems tempting to
+simply only write down the values of :math:`A` and :math:`B`, declare
+that :math:`A` is abscissa for :math:`B`, and make a nice plot.
+Responsible scientific conduct principles however urge us to write down
+*everything we did*, which in terms of data saving amounts to also
+storing :math:`\boldsymbol{x}` and :math:`\boldsymbol{y}`. At the same
+time, we would like the dataset to reflect the *intention* of
+measurement, meaning what the measurement is supposed to be about,
+namely that it measures :math:`B` versus :math:`A`. Currently, this is
+handled by the dataset by declaring that :math:`B` *depends on*
+:math:`A` whereas :math:`A` is *inferred from* :math:`\boldsymbol{x}`
+and :math:`B` is *inferred from* :math:`\boldsymbol{y}`. In code, we set
+up the measurement like
+
+::
+
+ meas = Measurement()
+ meas.register_parameter(x1)
+ meas.register_parameter(x2)
+ meas.register_parameter(x3) # and so on
+ meas.register_parameter(y1)
+ meas.register_parameter(y2)
+ meas.register_parameter(y3) # etc
+ meas.register_parameter(A, inferred_from(x1, x2, x3))
+ meas.register_parameter(B, depends_on=(A,),
+ inferred_from=(y1, y2, y3))
+
+This is shown graphically in :numref:`fig_gen`.
+
+.. _fig_gen:
+.. figure:: figures/dependencies_01.svg
+ :alt: A drawing of the general setting
+ :align: center
+ :width: 60.0%
+
+ A drawing of the general setting
+
+The default plotter included in the dataset will understand the
+dependencies and plot :math:`B` versus :math:`A`.
+
+.. _sec:higher-dimension:
+
+Higher dimension
+================
+
+In the previous section, :math:`A` was to assumed to be a scalar. In the
+general case, the true independent variables :math:`\boldsymbol{x}` can
+be grouped together in :math:`k` different variables,
+:math:`A_1,\ldots,A_k` that represent the intention of the measurement.
+An example would be a heatmap plotting a demodulated signal as a
+function of two gate voltage axes. To describe a measurement of
+:math:`B` as :math:`A_1` and :math:`A_2` are varied, we set up the
+measurement like
+
+::
+
+ meas = Measurement()
+ meas.register_parameter(x1)
+ meas.register_parameter(x2) # and so on
+ meas.register_parameter(y1)
+ meas.register_parameter(y2) # etc
+ meas.register_parameter(A1, inferred_from(x1, x2))
+ meas.register_parameter(A2, inferred_from(x1, x2))
+ meas.register_parameter(B, depends_on=(A1, A2),
+ inferred_from=(y1, y2))
+
+Graphically:
+
+.. _fig_dep_02:
+.. figure:: figures/dependencies_02.svg
+ :alt: A heatmap
+ :align: center
+ :width: 60.0%
+
+ A heatmap
+
+It may of course very well be that e.g. :math:`A_1=x_1` in which case
+there is no point of having inferred parameter for :math:`A_1`.
+
+.. _sec:that-really-necess:
+
+Is that really necessary?
+=========================
+
+It should be clear that the ``inferred_from`` notion is a kind of
+metadata. It describes a relation between the raw values that the
+experimentalist can control and the desired outcome of an experiment. It
+is **not** required by the dataset to have any inferred variables, but
+we stress that it is unscientific to throw away raw measurement data.
+Whatever raw values are recorded should thus be saved along with the
+“interesting” parameter values, and the ``inferred_from`` tagging is
+simply a way of declaring what is derived from where.
+
+In a perfect world, an auxiliary laboratory notebook contains all the
+information needed to *exactly* reproduce the experiment, and the
+dataset needs only store the numerical values of parameters and nothing
+else. In a sort of pragmatic recognition of how actual laboratories
+usually work, we have decided to put some metadata directly into the
+dataset. Specifically, we want the dataset to be able to hold
+information about
+
+- What the experimenter wishes to study as a function of what
+ (expressed via ``depends_on``).
+
+- What corresponds to a raw machine setting/reading (expressed via
+ ``inferred_from``).
+
+As complexity of the experiments grow, the second notion can be
+difficult to uphold. It is offered as a help to ensure good scientific
+practice.
+
+It is important to note that the dataset can freely be used without
+*any* declarations of dependencies of either sort.
+
+.. _sec:plotting:
+
+Plotting
+========
+
+Besides being optional metadata describing the correct interpretation of
+measurement data, the direct dependencies (expressed via ``depends_on``)
+are used to generate the *default plot*. We estimate that for the vast
+majority of measurements to be stored in the dataset, the
+experimentalist will want to be able to plot the data as they are coming
+in and also have the ability to quickly bring up a plot of a particular
+measurement without specifying more than the id of said measurement.
+This necessitates the declaration, in the dataset itself, of what should
+be plotted against what. The direct dependencies can thus be understood
+in the following way: :math:`A` depends on :math:`B` and :math:`C` means
+that the default plot is of :math:`A` with :math:`B` on one axis and
+:math:`C` on the other.
+
+Although visual plotting is not tractable for an arbitrary amount of
+axes, we promote the principle of having a default plot to be a logical
+principle about which dependencies we allow: only those resulting in a
+meaningful (perhaps :math:`N`-dimensional) default plot are allowed.
+
+.. _sec:all-possible-trees:
+
+All possible trees
+==================
+
+Now that we have established a language for describing connections
+between parameters, and also described our aim in terms of plotting and
+metadat, let us review what the dataset does and does not allow.
+
+It follows from the consideration of section :ref:`sec:plotting` that
+the dataset allows for a *single* layer of direct dependencies. The
+trees shown in :numref:`bad_trees` are therefore
+all invalid and can **not** be stored in the dataset.
+
+.. _bad_trees:
+.. figure:: figures/bad_trees.svg
+ :alt: Not acceptable direct dependencies
+ :align: center
+ :width: 75.0%
+
+ Not acceptable direct dependencies
+
+A few words explaining why are in place.
+
+#. Circular dependence. There is no way of telling what is varied and
+ what is measured.
+
+#. Independent parameters not independent. Although :math:`A` clearly
+ sits on top of the tree, the two independent variables are not
+ independent. It is not clear whether :math:`C` is being varied or
+ measured. It is ambiguous whether this describes one plot of
+ :math:`A` with :math:`B` and :math:`C` as axes or two plots, one of
+ :math:`A` versus :math:`B` and another of :math:`C` versus :math:`B`
+ or even both situations at once.
+
+#. Similarly to situation 2, :math:`C` is ill-defined.
+
+#. :math:`B` is ill-defined, and it is not clear what :math:`A` should
+ be plotted against.
+
+It is perhaps instructive to see how the above trees could be remedied.
+In :numref:`bad_trees_remedied` we show
+all possible valid reconfigurations that neither invert any arrows nor
+leave any parameters completely decoupled [2]_. The fact that each tree
+of :numref:`bad_trees` has several valid
+reconfigurations exactly illustrates the ambiguity of those trees [3]_.
+
+In column **c** of
+:numref:`bad_trees_remedied` we see two
+somewhat new graphs. In **2c**, we allow two variables to depend on a
+third one. There is no ambiguity here, two plots will result from this
+measurement: :math:`A` versus :math:`B` and :math:`C` versus :math:`B`.
+Similarly, in **3c** we’ll get :math:`A` versus :math:`B` and :math:`C`
+versus :math:`D`. The total number of trees and plots per dataset is
+treated in the next section.
+
+.. _bad_trees_remedied:
+.. figure:: figures/bad_trees_remedied.svg
+ :alt: Acceptable recastings of the dependencies of :numref:`bad_trees`. The pathological tree 1 is omitted.
+ :align: center
+ :width: 85.0%
+
+ Acceptable recastings of the dependencies of
+ :numref:`bad_trees`. The pathological tree 1 is
+ omitted.
+
+.. _sec:number-trees-per:
+
+Number of trees per dataset
+===========================
+
+The dataset can hold an arbitrary number of “top-level” parameters,
+meaning parameters with arrows only going out of them, parameters on
+which nothing depends. At each step of the experiment, all parameters
+that such a top-level parameter points to must be assigned values, if
+the top-level parameter gets assigned a value. Otherwise, they may be
+omitted. What this means in practice is illustrated in
+:numref:`good_trees_complex`.
+
+.. _good_trees_complex:
+.. figure:: figures/good_trees_complex.svg
+ :alt: A more complex sweep example. The blue rectangles represent the results table in the database.
+ :align: center
+ :width: 85.0%
+
+ A more complex sweep example. The blue rectangles represent the
+ results table in the database.
+
+We may say that this dataset de facto contains two trees, one
+:math:`A-B-D` tree and one :math:`C-B` tree [4]_ . One dataset can hold
+as many such trees as desired. In code,
+:numref:`good_trees_complex` might take the following form:
+
+::
+
+ meas = Measurement()
+ meas.register_parameter(D)
+ meas.register_parameter(B)
+ meas.register_parameter(A, depends_on=(B, D))
+ meas.register_parameter(C, depends_on=(B,))
+
+ with meas.run() as datasaver:
+ for b_val in b_vals:
+ for d_val in d_vals:
+ B.set(b_val)
+ D.set(d_val)
+ a_val = A.get()
+ datasaver.add_result((A, a_val),
+ (B, b_val),
+ (D, d_val))
+ c_val = C.get()
+ datasaver.add_result((C, c_val),
+ (B, b_val))
+
+.. _sec:few-examples:
+
+A few examples
+==============
+
+Finally, to offer some intuition for the dataset’s dependency structure,
+we cast a few real-life examples of measurements into tree diagrams.
+
+.. _sec:cond-meas:
+
+Conductance measurement
+-----------------------
+
+In a conductance measurement measuring conductance as a function of gate
+voltage, a gate voltage, :math:`V_\text{gate}`, is swept while a lock-in
+amplifier drives the DUT at a certain frequency with a drive amplitude
+:math:`V_\text{drive}`. The drive induces a current which oscillates at
+the drive frequency. An I-V converter converts that oscillating current
+back into an oscillating voltage (which a certain gain factor,
+:math:`G_{IV}`, with units :math:`A/V`), and that voltage is fed back
+into the lock-in. Assuming no phase shift, the lock-in amplifier’s
+:math:`X` reading is then related to the conductance, :math:`g`,
+according to
+
+.. math:: g = \frac{X}{V_\text{drive}G_{IV}}.
+
+The corresponding parameter tree is shown in
+:numref:`example1_conductance`, where :math:`A` is :math:`g`,
+:math:`B` is :math:`V_\text{gate}`, and :math:`C` is :math:`X`. One
+could of course argue that :math:`V_\text{drive}` and :math:`G_{IV}`
+should also be parameters that :math:`g` is inferred from. We suggest
+the following rule: anything that is known beforehand to remain constant
+throughout the *entire* run can be omitted from the dataset and written
+down elsewhere [5]_. The converse also holds: anything that *does*
+change during a run really *should* be saved along.
+
+.. _example1_conductance:
+.. figure:: figures/example1_conductance.svg
+ :alt: Conductance measurement.
+ :align: center
+ :width: 40.0%
+
+ Conductance measurement.
+
+.. _sec:comp-sweep:
+
+Compensatory sweeping
+---------------------
+
+An interesting example that potentially does *not* fit so nicely into
+our scheme is offered by compensatory sweeping. A voltage, :math:`V_1`
+is swept and a quantity :math:`S` is measured. Since sweeping
+:math:`V_1` has some undesired effect on the physical system, a
+compensatory change of another voltage, :math:`V_2` is performed at the
+same time. :math:`V_2` changes with :math:`V_1` according to
+
+.. math:: V_2 = \alpha V_1 + \beta.
+
+Since both :math:`\alpha` and :math:`\beta` might change during the run
+via some feedback mechanism, we have four parameters apart from :math:`S`
+to sort out.
+
+There are two ways to go about this.
+
+.. _sec:decoupling:
+
+Decoupling
+~~~~~~~~~~
+
+If the experimentalist really insists that the interesting plot for this
+measurement is that of :math:`S` versus :math:`V_1` and the compensation
+is just some unfortunate but necessary circumstance, then the unusual
+tree of :numref:`example2_compensation_A` is the correct
+representation.
+
+.. _example2_compensation_A:
+.. figure:: figures/example2_compensation_A.svg
+ :alt: Sweeping a voltage with compensation in the background. :math:`A` is :math:`V_1`, :math:`B` is :math:`S`, :math:`D` is :math:`V_2`, :math:`C` is :math:`\alpha`, and :math:`E` is :math:`\beta`.
+ :align: center
+ :width: 30.0%
+
+ Sweeping a voltage with compensation in the background. :math:`A` is
+ :math:`V_1`, :math:`B` is :math:`S`, :math:`D` is :math:`V_2`,
+ :math:`C` is :math:`\alpha`, and :math:`E` is :math:`\beta`.
+
+The tree of :numref:`example2_compensation_A` does fit into the
+scheme of :numref:`fig_gen`, the scheme we promised to
+represent the most general setting. There are now two possibilities.
+Either *we* were initially wrong and no dependencies save for those
+specifying the default plot can be defined for this measurement. Else
+*the experimentalist* is wrong, and has an untrue representation of the
+experiment in mind. We explore that idea in below in :ref:`sec:restructuring`.
+
+.. _sec:restructuring:
+
+Restructuring
+~~~~~~~~~~~~~
+
+If the space spanned by :math:`V_1` and :math:`V_2` has a meaningful
+physical interpretation [6]_, it might make more sense to define a new
+parameter, :math:`V_3` that represents the path swept along in that
+space. After all, this is what is :math:`physically` happening,
+:math:`S` is measured as a function of :math:`V_3`. Then the tree of
+:numref:`example2_compensation_B` emerges.
+
+.. _example2_compensation_B:
+.. figure:: figures/example2_compensation_B.svg
+ :alt: Sweeping along a path in voltage space. :math:`A` is :math:`V_1`, :math:`B` is :math:`S`, :math:`D` is :math:`V_2`, :math:`C` is :math:`\alpha`, :math:`E` is :math:`\beta`, and :math:`F` is :math:`V_3`.
+ :align: center
+ :width: 30.0%
+
+ Sweeping along a path in voltage space. :math:`A` is :math:`V_1`,
+ :math:`B` is :math:`S`, :math:`D` is :math:`V_2`, :math:`C` is
+ :math:`\alpha`, :math:`E` is :math:`\beta`, and :math:`F` is
+ :math:`V_3`.
+
+.. [1]
+ That is to say, for many :math:`(i, j)`, it holds that
+ :math:`\frac{\partial y_i}{\partial x_j}=0`.
+
+.. [2]
+ We repeat that the dataset can hold an arbitrary amount of decoupled
+ parameters. For illustrative and combinatorical reasons (there’d
+ simply be too many trees!), we omit decoupled parameters here.
+
+.. [3]
+ Note that the ambiguity could be resolved by enforcing particular
+ rules of interpretation. Here we have made the *design choice* of
+ disallowing ambiguity in the first instance.
+
+.. [4]
+ We note for completeness that the values for, say,
+ :math:`b_1, b_2, \ldots` need not be different at each step. Perhaps
+ :math:`B` represents a voltage that is kept constant as :math:`D` is
+ varied.
+
+.. [5]
+ E.g. the station snapshot (in which case it is actually still in the
+ dataset but not in the results table) or even a laboratory logbook
+ describing the equipment.
+
+.. [6]
+ Say, for instance, :math:`V_1` and :math:`V_2` are drain and gate
+ voltages and the experimentalist wants to sweep along a certain path
+ inside a skewed Coulomb diamond.
diff --git a/specs/DataSet.rst b/docs/dataset/spec.rst
similarity index 88%
rename from specs/DataSet.rst
rename to docs/dataset/spec.rst
index 2227ac91014..a9654c0d7c0 100644
--- a/specs/DataSet.rst
+++ b/docs/dataset/spec.rst
@@ -1,3 +1,5 @@
+.. _dataset-spec:
+
=====================
DataSet Specification
=====================
@@ -27,13 +29,13 @@ Metadata
Parameter
A logically-single value input to or produced by a measurement.
A parameter need not be a scalar, but can be an array or a tuple or an array of tuples, etc.
- A DataSet parameter corresponds conceptually to a QCoDeS parameter, but does not have to be defined by or associated with a QCoDeS Parameter .
+ A DataSet parameter corresponds conceptually to a QCoDeS parameter, but does not have to be defined by or associated with a QCoDeS Parameter .
Roughly, a parameter represents a column in a table of experimental data.
-
+
Result
A result is the collection of parameter values associated to a single measurement in an experiment.
Roughly, a result corresponds to a row in a table of experimental data.
-
+
DataSet
A DataSet is a QCoDeS object that stores the results of an experiment.
Roughly, a DataSet corresponds to a table of experimental data, along with metadata that describes the data.
@@ -58,7 +60,7 @@ Basics
Creation
------------
-#. It should be possible to create a DataSet without knowing the final item count of the various values it stores.
+#. It should be possible to create a DataSet without knowing the final item count of the various values it stores.
In particular, the number of loop iterations for a sweep should not be required to create the DataSet.
#. The list of parameters in each result to be stored in a DataSet may be specified at creation time.
This includes the name, role (set-point or output), and type of each parameter.
@@ -66,13 +68,13 @@ Creation
#. It should be possible to add a new parameter to an in-progress DataSet.
#. It should be possible to define a result parameter that is independent of any QCoDeSParameter or Instrument.
#. A QCoDeS Parameter should provide sufficient information to define a result parameter.
-#. A DataSet should allow storage of relatively arbitrary metadata describing the run that
+#. A DataSet should allow storage of relatively arbitrary metadata describing the run that
generated the results and the parameters included in the results.
- Essentially, DataSet metadata should be a string-keyed dictionary at the top,
+ Essentially, DataSet metadata should be a string-keyed dictionary at the top,
and should allow storage of any JSON-encodable data.
#. The DataSet identifier should be automatically stored in the DataSet's metadata under the "id" tag.
-
+
Writing
----------
@@ -115,23 +117,23 @@ ParamSpec
A ParamSpec object specifies a single parameter in a DataSet.
-ParamSpec(name, type, metadata=)
- Creates a parameter specification with the given name and type.
+``ParamSpec(name, type, metadata=)``
+ Creates a parameter specification with the given name and type.
The type should be a NumPy dtype object.
-
+
If metadata is provided, it is included in the overall metadata of the DataSet.
The metadata can be any JSON-able object.
-
-ParamSpec.name
+
+``ParamSpec.name``
The name of this parameter.
-
-ParamSpec.type
+
+``ParamSpec.type``
The dtype of this parameter.
-
-ParamSpec.metadata
+
+``ParamSpec.metadata``
The metadata of this parameter.
This should be an empty dictionary as a default.
-
+
Either the QCoDeS Parameter class should inherit from ParamSpec, or the Parameter class should provide
a simple way to get a ParamSpec for the Parameter.
@@ -141,71 +143,71 @@ DataSet
Construction
------------
-DataSet(name)
+``DataSet(name)``
Creates a DataSet with no parameters.
The name should be a short string that will be part of the DataSet's identifier.
-DataSet(name, specs)
+``DataSet(name, specs)``
Creates a DataSet for the provided list of parameter specifications.
The name should be a short string that will be part of the DataSet's identifier.
Each item in the list should be a ParamSpec object.
-
-DataSet(name, specs, values)
+
+``DataSet(name, specs, values)``
Creates a DataSet for the provided list of parameter specifications and values.
The name should be a short string that will be part of the DataSet's identifier.
Each item in the specs list should be a ParamSpec object.
- Each item in the values list should be a NumPy array or a Python list of values for the corresponding ParamSpec.
- There should be exactly one item in the values list for every item in the specs list.
- All of the arrays/lists in the values list should have the same length.
- The values list may intermix NumPy arrays and Python lists.
+ Each item in the values list should be a NumPy array or a Python list of values for the corresponding ParamSpec.
+ There should be exactly one item in the values list for every item in the specs list.
+ All of the arrays/lists in the values list should have the same length.
+ The values list may intermix NumPy arrays and Python lists.
-DataSet.add_parameter(spec)
+``DataSet.add_parameter(spec)``
Adds a parameter to the DataSet.
The spec should be a ParamSpec object.
If the DataSet is not empty, then existing results will have the type-appropriate null value for the new parameter.
-
+
It is an error to add parameters to a completed DataSet.
-DataSet.add_parameters(specs)
+``DataSet.add_parameters(specs)``
Adds a list of parameters to the DataSet.
Each item in the list should be a ParamSpec object.
If the DataSet is not empty, then existing results will have the type-appropriate null value for the new parameters.
-
+
It is an error to add parameters to a completed DataSet.
-DataSet.add_metadata(tag=, metadata=)
+``DataSet.add_metadata(tag=, metadata=)``
Adds metadata to the DataSet.
The metadata is stored under the provided tag.
- If there is already metadata under the provided tag, the new metadata replaces the old metadata.
+ If there is already metadata under the provided tag, the new metadata replaces the old metadata.
The metadata can be any JSON-able object.
Writing
-------
-DataSet.add_result(**kwargs)
+``DataSet.add_result(**kwargs)``
Adds a result to the DataSet.
Keyword parameters should have the name of a parameter as the keyword and the value to associate as the value.
If there is only one positional parameter and it is a dictionary, then it is interpreted as a map from parameter name to parameter value.
Returns the zero-based index in the DataSet that the result was stored at; that is, it returns the length of the DataSet before the addition.
-
+
It is an error to provide a value for a key or keyword that is not the name of a parameter in this DataSet.
-
+
It is an error to add a result to a completed DataSet.
-DataSet.add_results(args)
+``DataSet.add_results(args)``
Adds a sequence of results to the DataSet.
The single argument should be a sequence of dictionaries, where each dictionary provides the values for all of the parameters in that result.
See the add_result method for a description of such a dictionary.
The order of dictionaries in the sequence will be the same as the order in which they are added to the DataSet.
-
+
Returns the zero-based index in the DataSet that the first result was stored at; that is, it returns the length of the DataSet before the addition.
-
+
It is an error to provide a value for a key or keyword that is not the name of a parameter in this DataSet.
-
+
It is an error to add results to a completed DataSet.
-DataSet.modify_result(index, **kwargs)
+``DataSet.modify_result(index, **kwargs)``
Modifies a result in the DataSet.
The index should be the zero-based index of the result to be modified.
Keyword parameters should have the name of a parameter as the keyword and the updated value to associate as the value.
@@ -215,96 +217,96 @@ DataSet.modify_result(index, **kwargs)
To remove a parameter from a result, map it to None.
It is an error to modify a result at an index less than zero or beyond the end of the DataSet.
-
+
It is an error to provide a value for a key or keyword that is not the name of a parameter in this DataSet.
-
+
It is an error to modify a result in a completed DataSet.
-DataSet.modify_results(start_index, updates)
+``DataSet.modify_results(start_index, updates)``
Modifies a sequence of results in the DataSet.
The start_index should be the zero-based index of the first result of the sequence to be modified.
- The updates argument should be a sequence of dictionaries, where each dictionary provides modified values for parameters
+ The updates argument should be a sequence of dictionaries, where each dictionary provides modified values for parameters
as a map from parameter name to parameter value.
See the modify_result method for a description of such a dictionary.
The order of dictionaries in the sequence will be the same as the order in which they are applied to the DataSet.
-
+
Any parameters that were specified in a original result that do not appear in the corresponding modification are left unchanged.
To remove a parameter from a result, map it to None.
It is an error to modify a result at an index less than zero or beyond the end of the DataSet.
-
+
It is an error to provide a value for a key or keyword that is not the name of a parameter in this DataSet.
-
+
It is an error to modify results in a completed DataSet.
-DataSet.add_parameter_values(spec, values)
- Adds a parameter to the DataSet and associates result values with the new parameter.
- The values must be a NumPy array or a Python list, with each element holding a single result value that matches the parameter's data type.
- If the DataSet is not empty, then the count of provided values must equal the current count of results in the DataSet, or an error will result.
-
+``DataSet.add_parameter_values(spec, values)``
+ Adds a parameter to the DataSet and associates result values with the new parameter.
+ The values must be a NumPy array or a Python list, with each element holding a single result value that matches the parameter's data type.
+ If the DataSet is not empty, then the count of provided values must equal the current count of results in the DataSet, or an error will result.
+
It is an error to add parameters to a completed DataSet.
-
-DataSet.mark_complete()
+
+``DataSet.mark_complete()``
Marks the DataSet as completed.
Access
------
-DataSet.id
+``DataSet.id``
Returns the unique identifying string for this DataSet.
This string will include the date and time that the DataSet was created and the name supplied to the constructor,
as well as additional content to ensure uniqueness.
-DataSet.length
- This attribute holds the current number of results in the DataSet.
+``DataSet.length``
+ This attribute holds the current number of results in the DataSet.
-DataSet.is_empty
+``DataSet.is_empty``
This attribute will be true if the DataSet is empty (has no results), or false if at least one result has been added to the DataSet.
It is equivalent to testing if the length is zero.
-DataSet.is_marked_complete
+``DataSet.is_marked_complete``
This attribute will be true if the DataSet has been marked as complete or false if it is in progress.
-DataSet.get_data(*params, start=, end=)
+``DataSet.get_data(*params, start=, end=)``
Returns the values stored in the DataSet for the specified parameters.
The values are returned as a list of parallel NumPy arrays, one array per parameter.
The data type of each array is based on the data type provided when the DataSet was created.
-
+
The parameter list may contain a mix of string parameter names, QCoDeS Parameter objects, and ParamSpec objects.
-
- If provided, the start and end parameters select a range of results by result count (index).
+
+ If provided, the start and end parameters select a range of results by result count (index).
Start defaults to 0, and end defaults to the current length.
-
+
If the range is empty -- that is, if the end is less than or equal to the start, or if start is after the current end of the DataSet –
then a list of empty arrays is returned.
-DataSet.get_parameters()
+``DataSet.get_parameters()``
Returns a list of ParamSpec objects that describe the parameters stored in this DataSet.
-DataSet.get_metadata(tag=)
+``DataSet.get_metadata(tag=)``
Returns metadata for this DataSet.
-
+
If a tag string is provided, only metadata stored under that tag is returned.
Otherwise, all metadata is returned.
-
+
Subscribing
----------------
-DataSet.subscribe(callback, min_wait=, min_count=, state=)
+``DataSet.subscribe(callback, min_wait=, min_count=, state=)``
Subscribes the provided callback function to result additions to the DataSet.
As results are added to the DataSet, the subscriber is notified by having the callback invoked.
-
+
- min_wait is the minimum amount of time between notifications for this subscription, in milliseconds. The default is 100.
- min_count is the minimum number of results for which a notification should be sent. The default is 1.
-
+
When the callback is invoked, it is passed the DataSet itself, the current length of the DataSet, and the state object provided when subscribing.
If no state object was provided, then the callback gets passed None as the fourth parameter.
-
+
The callback is invoked when the DataSet is completed, regardless of the values of min_wait and min_count.
-
+
This method returns an opaque subscription identifier.
-DataSet.unsubscribe(subid)
+``DataSet.unsubscribe(subid)``
Removes the indicated subscription.
The subid must be the same object that was returned from a DataSet.subscribe call.
@@ -318,7 +320,7 @@ The existing QCoDeS storage subsystem should be modified so that some object has
- A write_dataset method that takes a DataSet object and writes it to the appropriate storage location in an appropriate format.
- A read_dataset method that reads from the appropriate location, either with a specified format or inferring the format, and returns
a DataSet object.
-
+
Metadata
========
@@ -329,17 +331,17 @@ parameters
This tag contains a dictionary from the string name of each parameter to information about that parameter.
Thus, if DataSet ds has a parameter named "foo", there will be a key "foo" in the dictionary returned from ds.get_metadata("parameters").
The value associated with this key will be a string-keyed dictionary.
-
+
parameters/__param__/spec
This path contains a string-keyed dictionary with (at least) the following two keys:
The "type" key is associated with the NumPy dtype for the values of this parameter.
The "metadata" key is associated with the metadata that was passed to the ParamSpec constructor that defines this parameter, or an empty dictionary if no metadata was set.
-
+
Utilities
=========
There are many utility routines that may be defined outside of the DataSet class that may be useful.
-We collect several of them here, with the note that these functions will not be part of the DataSet class
+We collect several of them here, with the note that these functions will not be part of the DataSet class
and will not be required by the DataSet class.
dataframe_from_dataset(dataset)
@@ -352,4 +354,3 @@ Open Issues
This is convenient for adding data analysis results after the experiement has added, but could potentially lead mixing data from different experimental runs accidentally.
It is already possible to modify metadata after the DataSet has beenmarked as completed, but sometimes that may not be sufficient.
-
diff --git a/docs/index.rst b/docs/index.rst
index b02b2a4d667..656b9b0091b 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -61,6 +61,7 @@ Documentation
help
user/index
community/index
+ dataset/index
api/index
api/generated/qcodes.instrument_drivers
roadmap
diff --git a/qcodes/dataset/experiment_container.py b/qcodes/dataset/experiment_container.py
index db0a0cc9a21..50caec3d549 100644
--- a/qcodes/dataset/experiment_container.py
+++ b/qcodes/dataset/experiment_container.py
@@ -126,7 +126,7 @@ def __repr__(self) -> str:
return "\n".join(out)
-# pulbic api
+# public api
def experiments()->List[Experiment]:
"""
@@ -204,7 +204,7 @@ def load_experiment_by_name(name: str,
the requested experiment
Raises:
- ValueErorr if the name is not unique and sample name is None.
+ ValueError if the name is not unique and sample name is None.
"""
e = Experiment(DB)
if sample:
diff --git a/qcodes/dataset/sqlite_base.py b/qcodes/dataset/sqlite_base.py
index fd218c9ae96..a9481b442ba 100644
--- a/qcodes/dataset/sqlite_base.py
+++ b/qcodes/dataset/sqlite_base.py
@@ -1010,7 +1010,6 @@ def get_paramspec(conn: sqlite3.Connection,
c = conn.execute(sql)
resp = many(c, 'layout_id', 'run_id', 'parameter', 'label', 'unit',
'inferred_from')
-
(layout_id, _, _, label, unit, inferred_from_string) = resp
if inferred_from_string: