diff --git a/.github/scripts/check-commit.sh b/.github/scripts/check-commit.sh index 8043c12fe..8fa9c24ce 100755 --- a/.github/scripts/check-commit.sh +++ b/.github/scripts/check-commit.sh @@ -92,7 +92,7 @@ dir="software" branches=("master" "dev") search -submodules=("DRAMSim2" "axe" "barstools" "dsptools" "rocket-dsp-utils" "torture" "fixedpoint" "cde") +submodules=("DRAMSim2" "axe" "dsptools" "rocket-dsp-utils" "torture" "fixedpoint" "cde") dir="tools" branches=("master" "dev" "main") search diff --git a/.gitmodules b/.gitmodules index 6ebbad59b..764a43819 100644 --- a/.gitmodules +++ b/.gitmodules @@ -127,9 +127,6 @@ [submodule "tools/axe"] path = tools/axe url = https://github.com/CTSRD-CHERI/axe.git -[submodule "tools/barstools"] - path = tools/barstools - url = https://github.com/ucb-bar/barstools.git [submodule "tools/cde"] path = tools/cde url = https://github.com/chipsalliance/cde.git diff --git a/build.sbt b/build.sbt index d929b3ca2..03bfeaab5 100644 --- a/build.sbt +++ b/build.sbt @@ -158,7 +158,7 @@ lazy val testchipip = (project in file("generators/testchipip")) .settings(commonSettings) lazy val chipyard = (project in file("generators/chipyard")) - .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, iocell, + .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, sha3, // On separate line to allow for cleaner tutorial-setup patches dsptools, rocket_dsp_utils, gemmini, icenet, tracegen, cva6, nvdla, sodor, ibex, fft_generator, @@ -256,13 +256,10 @@ lazy val rocc_acc_utils = (project in file("generators/rocc-acc-utils")) .settings(libraryDependencies ++= rocketLibDeps.value) .settings(commonSettings) -lazy val iocell = Project(id = "iocell", base = file("./tools/barstools/") / "iocell") - .settings(chiselSettings) - .settings(commonSettings) - -lazy val tapeout = (project in file("./tools/barstools/")) +lazy val tapeout = (project in file("./tools/tapeout/")) .settings(chiselSettings) .settings(commonSettings) + .settings(libraryDependencies ++= Seq("com.typesafe.play" %% "play-json" % "2.9.2")) lazy val fixedpoint = (project in file("./tools/fixedpoint/")) .settings(chiselSettings) diff --git a/common.mk b/common.mk index a2e31b514..ea5811acd 100644 --- a/common.mk +++ b/common.mk @@ -91,9 +91,9 @@ VLOG_EXT = sv v CHIPYARD_SOURCE_DIRS = $(addprefix $(base_dir)/,generators sims/firesim/sim fpga/fpga-shells fpga/src) CHIPYARD_SCALA_SOURCES = $(call lookup_srcs_by_multiple_type,$(CHIPYARD_SOURCE_DIRS),$(SCALA_EXT)) CHIPYARD_VLOG_SOURCES = $(call lookup_srcs_by_multiple_type,$(CHIPYARD_SOURCE_DIRS),$(VLOG_EXT)) -BARSTOOLS_SOURCE_DIRS = $(addprefix $(base_dir)/,tools/barstools) -BARSTOOLS_SCALA_SOURCES = $(call lookup_srcs_by_multiple_type,$(BARSTOOLS_SOURCE_DIRS),$(SCALA_EXT)) -BARSTOOLS_VLOG_SOURCES = $(call lookup_srcs_by_multiple_type,$(BARSTOOLS_SOURCE_DIRS),$(VLOG_EXT)) +TAPEOUT_SOURCE_DIRS = $(addprefix $(base_dir)/,tools/tapeout) +TAPEOUT_SCALA_SOURCES = $(call lookup_srcs_by_multiple_type,$(TAPEOUT_SOURCE_DIRS),$(SCALA_EXT)) +TAPEOUT_VLOG_SOURCES = $(call lookup_srcs_by_multiple_type,$(TAPEOUT_SOURCE_DIRS),$(VLOG_EXT)) # This assumes no SBT meta-build sources SBT_SOURCE_DIRS = $(addprefix $(base_dir)/,generators sims/firesim/sim tools) SBT_SOURCES = $(call lookup_srcs,$(SBT_SOURCE_DIRS),sbt) $(base_dir)/build.sbt $(base_dir)/project/plugins.sbt $(base_dir)/project/build.properties @@ -127,7 +127,7 @@ $(CHIPYARD_CLASSPATH_TARGETS) &: $(CHIPYARD_SCALA_SOURCES) $(SCALA_BUILDTOOL_DEP $(call run_sbt_assembly,$(SBT_PROJECT),$(CHIPYARD_CLASSPATH)) # order only dependency between sbt runs needed to avoid concurrent sbt runs -$(TAPEOUT_CLASSPATH_TARGETS) &: $(BARSTOOLS_SCALA_SOURCES) $(SCALA_BUILDTOOL_DEPS) $(BARSTOOLS_VLOG_SOURCES) | $(CHIPYARD_CLASSPATH_TARGETS) +$(TAPEOUT_CLASSPATH_TARGETS) &: $(TAPEOUT_SCALA_SOURCES) $(SCALA_BUILDTOOL_DEPS) $(TAPEOUT_VLOG_SOURCES) | $(CHIPYARD_CLASSPATH_TARGETS) mkdir -p $(dir $@) $(call run_sbt_assembly,tapeout,$(TAPEOUT_CLASSPATH)) @@ -165,7 +165,7 @@ define sfc_extra_low_transforms_anno_contents [ { "class": "firrtl.stage.RunFirrtlTransformAnnotation", - "transform": "barstools.tapeout.transforms.ExtraLowTransforms" + "transform": "tapeout.transforms.ExtraLowTransforms" } ] endef @@ -232,7 +232,7 @@ $(FINAL_ANNO_FILE): $(EXTRA_ANNO_FILE) $(SFC_EXTRA_ANNO_FILE) $(SFC_LEVEL) $(SFC_MFC_TARGETS) &: private TMP_DIR := $(shell mktemp -d -t cy-XXXXXXXX) $(SFC_MFC_TARGETS) &: $(TAPEOUT_CLASSPATH_TARGETS) $(FIRRTL_FILE) $(FINAL_ANNO_FILE) $(SFC_LEVEL) $(EXTRA_FIRRTL_OPTIONS) $(MFC_LOWERING_OPTIONS) rm -rf $(GEN_COLLATERAL_DIR) - $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),barstools.tapeout.transforms.GenerateModelStageMain,\ + $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),tapeout.transforms.GenerateModelStageMain,\ --no-dedup \ --output-file $(SFC_FIRRTL_BASENAME) \ --output-annotation-file $(SFC_ANNO_FILE) \ @@ -297,12 +297,12 @@ $(TOP_SMEMS_CONF) $(MODEL_SMEMS_CONF) &: $(MFC_SMEMS_CONF) $(MFC_MODEL_HRCHY_JS # This file is for simulation only. VLSI flows should replace this file with one containing hard SRAMs TOP_MACROCOMPILER_MODE ?= --mode synflops $(TOP_SMEMS_FILE) $(TOP_SMEMS_FIR) &: $(TAPEOUT_CLASSPATH_TARGETS) $(TOP_SMEMS_CONF) - $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),barstools.macros.MacroCompiler,-n $(TOP_SMEMS_CONF) -v $(TOP_SMEMS_FILE) -f $(TOP_SMEMS_FIR) $(TOP_MACROCOMPILER_MODE)) + $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),tapeout.macros.MacroCompiler,-n $(TOP_SMEMS_CONF) -v $(TOP_SMEMS_FILE) -f $(TOP_SMEMS_FIR) $(TOP_MACROCOMPILER_MODE)) touch $(TOP_SMEMS_FILE) $(TOP_SMEMS_FIR) MODEL_MACROCOMPILER_MODE = --mode synflops $(MODEL_SMEMS_FILE) $(MODEL_SMEMS_FIR) &: $(TAPEOUT_CLASSPATH_TARGETS) $(MODEL_SMEMS_CONF) - $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),barstools.macros.MacroCompiler, -n $(MODEL_SMEMS_CONF) -v $(MODEL_SMEMS_FILE) -f $(MODEL_SMEMS_FIR) $(MODEL_MACROCOMPILER_MODE)) + $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),tapeout.macros.MacroCompiler, -n $(MODEL_SMEMS_CONF) -v $(MODEL_SMEMS_FILE) -f $(MODEL_SMEMS_FIR) $(MODEL_MACROCOMPILER_MODE)) touch $(MODEL_SMEMS_FILE) $(MODEL_SMEMS_FIR) ######################################################################################## diff --git a/docs/Advanced-Concepts/Resources.rst b/docs/Advanced-Concepts/Resources.rst index fbfaad60a..99125194e 100644 --- a/docs/Advanced-Concepts/Resources.rst +++ b/docs/Advanced-Concepts/Resources.rst @@ -33,7 +33,3 @@ For example: lazy val myAwesomeAccel = (project in file("generators/myAwesomeAccelFolder")) .dependsOn(rocketchip) .settings(commonSettings) - - lazy val tapeout = conditionalDependsOn(project in file("./tools/barstools/tapeout/")) - .dependsOn(myAwesomeAccel) - .settings(commonSettings) diff --git a/docs/Chipyard-Basics/Chipyard-Components.rst b/docs/Chipyard-Basics/Chipyard-Components.rst index 669bfdfc5..7fdbe26da 100644 --- a/docs/Chipyard-Basics/Chipyard-Components.rst +++ b/docs/Chipyard-Basics/Chipyard-Components.rst @@ -79,9 +79,9 @@ Tools FIRRTL enables digital circuits manipulation between Chisel elaboration and Verilog generation. See :ref:`Tools/FIRRTL:FIRRTL` for more information. -**Barstools** +**Tapeout-Tools (Formerly Barstools)** A collection of common FIRRTL transformations used to manipulate a digital circuit without changing the generator source RTL. - See :ref:`Tools/Barstools:Barstools` for more information. + See :ref:`Tools/Tapeout-Tools:Tapeout-Tools` for more information. **Dsptools** A Chisel library for writing custom signal processing hardware, as well as integrating custom signal processing hardware into an SoC (especially a Rocket-based SoC). diff --git a/docs/Customization/Custom-Chisel.rst b/docs/Customization/Custom-Chisel.rst index a3b4ef72c..19a7bde8c 100644 --- a/docs/Customization/Custom-Chisel.rst +++ b/docs/Customization/Custom-Chisel.rst @@ -59,7 +59,7 @@ should look something like this: .. code-block:: scala lazy val chipyard = (project in file("generators/chipyard")) - .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, iocell, + .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, sha3, dsptools, `rocket-dsp-utils`, gemmini, icenet, tracegen, cva6, nvdla, sodor, ibex, fft_generator, yourproject, // <- added to the middle of the list for simplicity diff --git a/docs/Customization/Firrtl-Transforms.rst b/docs/Customization/Firrtl-Transforms.rst index 1113bde63..7de7aadbd 100644 --- a/docs/Customization/Firrtl-Transforms.rst +++ b/docs/Customization/Firrtl-Transforms.rst @@ -22,18 +22,18 @@ Where to add transforms In Chipyard, the FIRRTL compiler is called multiple times to create a "Top" file that contains the DUT and a "Model" file containing the test harness, which instantiates the DUT. The "Model" file does not contain the DUT's module definition or any of its submodules. -This is done by the ``tapeout`` SBT project (located in ``tools/barstools/tapeout``) which calls ``GenerateModelStageMain`` (a function that wraps the multiple FIRRTL compiler calls and extra transforms). +This is done by the ``tapeout`` SBT project (located in ``tools/tapeout``) which calls ``GenerateModelStageMain`` (a function that wraps the multiple FIRRTL compiler calls and extra transforms). .. literalinclude:: ../../common.mk :language: make :start-after: DOC include start: FirrtlCompiler :end-before: DOC include end: FirrtlCompiler -If you look inside of the `tools/barstools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala `__ file, +If you look inside of the ``tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala`` file, you can see that FIRRTL is invoked for "Model". Currently, the FIRRTL compiler is agnostic to the ``TOP`` and ``MODEL`` differentiation, and the user is responsible for providing annotations that will inform the compiler where(``TOP`` vs ``MODEL``) to perform the custom FIRRTL transformations. -For more information on Barstools, please visit the :ref:`Tools/Barstools:Barstools` section. +For more information on the Tapeout sub-project, please visit the :ref:`Tools/Tapeout-Tools:Tapeout-Tools` section. Examples of transforms ---------------------- diff --git a/docs/Customization/Incorporating-Verilog-Blocks.rst b/docs/Customization/Incorporating-Verilog-Blocks.rst index fde44411f..9d7ceb814 100644 --- a/docs/Customization/Incorporating-Verilog-Blocks.rst +++ b/docs/Customization/Incorporating-Verilog-Blocks.rst @@ -33,19 +33,6 @@ different directory from Chisel (Scala) sources. vsrc/ YourFile.v -In addition to the steps outlined in the previous section on adding a -project to the ``build.sbt`` at the top level, it is also necessary to -add any projects that contain Verilog IP as dependencies to the -``tapeout`` project. This ensures that the Verilog sources are visible -to the downstream FIRRTL passes that provide utilities for integrating -Verilog files into the build process, which are part of the -``tapeout`` package in ``barstools/tapeout``. - -.. code-block:: scala - - lazy val tapeout = conditionalDependsOn(project in file("./tools/barstools/tapeout/")) - .dependsOn(chisel_testers, example, yourproject) - .settings(commonSettings) For this concrete GCD example, we will be using a ``GCDMMIOBlackBox`` Verilog module that is defined in the ``chipyard`` project. The Scala diff --git a/docs/Tools/Barstools.rst b/docs/Tools/Tapeout-Tools.rst similarity index 95% rename from docs/Tools/Barstools.rst rename to docs/Tools/Tapeout-Tools.rst index fa4176c45..b522c46d0 100644 --- a/docs/Tools/Barstools.rst +++ b/docs/Tools/Tapeout-Tools.rst @@ -1,7 +1,7 @@ -Barstools +Tapeout-Tools =============================== -Barstools is a collection of useful FIRRTL transformations and compilers to help the build process. +Tapeout-Tools is a collection of useful FIRRTL transformations and compilers to help the build process. Included in the tools are a MacroCompiler (used to map Chisel memory constructs to vendor SRAMs), FIRRTL transforms (to separate harness and top-level SoC files), and more. Mapping technology SRAMs (MacroCompiler) @@ -23,16 +23,16 @@ An external module reference is a FIRRTL construct that enables a design to refe A list of unique SRAM configurations is output to a ``.conf`` file by FIRRTL, which is used to map technology SRAMs. Without this transform, FIRRTL will map all ``SeqMem`` s to flip-flop arrays with equivalent behavior, which may lead to a design that is difficult to route. -The ``.conf`` file is consumed by a tool called MacroCompiler, which is part of the :ref:`Tools/Barstools:Barstools` scala package. +The ``.conf`` file is consumed by a tool called MacroCompiler, which is part of the :ref:`Tools/Tapeout-Tools:Tapeout-Tools` scala package. MacroCompiler is also passed an ``.mdf`` file that describes the available list of technology SRAMs or the capabilities of the SRAM compiler, if one is provided by the foundry. -Typically a foundry SRAM compiler will be able to generate a set of different SRAMs collateral based on some requirements on size, aspect ratio, etc. (see :ref:`Tools/Barstools:SRAM MDF Fields`). +Typically a foundry SRAM compiler will be able to generate a set of different SRAMs collateral based on some requirements on size, aspect ratio, etc. (see :ref:`Tools/Tapeout-Tools:SRAM MDF Fields`). Using a user-customizable cost function, MacroCompiler will select the SRAMs that are the best fit for each dimensionality in the ``.conf`` file. This may include over provisioning (e.g. using a 64x1024 SRAM for a requested 60x1024, if the latter is not available) or arraying. Arraying can be done in both width and depth, as well as to solve masking constraints. For example, a 128x2048 array could be composed of four 64x1024 arrays, with two macros in parallel to create two 128x1024 virtual SRAMs which are combinationally muxed to add depth. If this macro requires byte-granularity write masking, but no technology SRAMs support masking, then the tool may choose to use thirty-two 8x1024 arrays in a similar configuration. You may wish to create a cache of your available SRAM macros either manually, or via a script. A reference script for creating a JSON of your SRAM macros is in the `asap7 technology library folder `__. -For information on writing ``.mdf`` files, look at `MDF on github `__ and a brief description in :ref:`Tools/Barstools:SRAM MDF Fields` section. +For information on writing ``.mdf`` files, look at `MDF on github `__ and a brief description in :ref:`Tools/Tapeout-Tools:SRAM MDF Fields` section. The output of MacroCompiler is a Verilog file containing modules that wrap the technology SRAMs into the specified interface names from the ``.conf``. If the technology supports an SRAM compiler, then MacroCompiler will also emit HammerIR that can be passed to Hammer to run the compiler itself and generate design collateral. @@ -105,7 +105,7 @@ This is necessary to facilitate post-synthesis and post-place-and-route simulati Simulations, after your design goes through a VLSI flow, will use the verilog netlist generated from the flow and will need an untouched test harness to drive it. Separating these components into separate files makes this straightforward. Without the separation the file that included the test harness would also redefine the DUT which is often disallowed in simulation tools. -To do this, there is a FIRRTL ``App`` in :ref:`Tools/Barstools:Barstools` called ``GenerateTopAndHarness``, which runs the appropriate transforms to elaborate the modules separately. +To do this, there is a FIRRTL ``App`` in :ref:`Tools/Tapeout-Tools:Tapeout-Tools` called ``GenerateTopAndHarness``, which runs the appropriate transforms to elaborate the modules separately. This also renames modules in the test harness so that any modules that are instantiated in both the test harness and the chip are uniquified. .. Note:: For VLSI projects, this ``App`` is run instead of the normal FIRRTL ``App`` to elaborate Verilog. @@ -133,5 +133,5 @@ This, unfortunately, breaks the process-agnostic RTL abstraction, so it is recom The simplest way to do this is to have a config fragment that when included updates instantiates the IO cells and connects them in the test harness. When simulating chip-specific designs, it is important to include the IO cells. The IO cell behavioral models will often assert if they are connected incorrectly, which is a useful runtime check. -They also keep the IO interface at the chip and test harness boundary (see :ref:`Tools/Barstools:Separating the Top module from the TestHarness module`) consistent after synthesis and place-and-route, +They also keep the IO interface at the chip and test harness boundary (see :ref:`Tools/Tapeout-Tools:Separating the Top module from the TestHarness module`) consistent after synthesis and place-and-route, which allows the RTL simulation test harness to be reused. diff --git a/docs/Tools/index.rst b/docs/Tools/index.rst index f8d0be95a..491bd60cd 100644 --- a/docs/Tools/index.rst +++ b/docs/Tools/index.rst @@ -12,4 +12,4 @@ The following pages will introduce them, and how we can use them in order to gen FIRRTL Treadle Dsptools - Barstools + Tapeout-Tools diff --git a/docs/VLSI/Basic-Flow.rst b/docs/VLSI/Basic-Flow.rst index 10a7af2f6..de1acc74f 100644 --- a/docs/VLSI/Basic-Flow.rst +++ b/docs/VLSI/Basic-Flow.rst @@ -56,7 +56,7 @@ We will do so by calling ``make buildfile`` with appropriate Chipyard configurat As in the rest of the Chipyard flows, we specify our SoC configuration using the ``CONFIG`` make variable. However, unlike the rest of the Chipyard flows, in the case of physical design we might be interested in working in a hierarchical fashion and therefore we would like to work on a single module. Therefore, we can also specify a ``VLSI_TOP`` make variable with the same of a specific Verilog module (which should also match the name of the equivalent Chisel module) which we would like to work on. -The makefile will automatically call tools such as Barstools and the MacroCompiler (:ref:`Tools/Barstools:barstools`) in order to make the generated Verilog more VLSI friendly. +The makefile will automatically call tools such as Tapeout-Tools and the MacroCompiler (:ref:`Tools/Tapeout-Tools:Tapeout-Tools`) in order to make the generated Verilog more VLSI friendly. By default, the MacroCompiler will attempt to map memories into the SRAM options within the Hammer technology plugin. However, if you are working with a new process technology and prefer to work with flip-flop arrays, you can configure the MacroCompiler using the ``TOP_MACROCOMPILER_MODE`` make variable. For example, if your technology plugin does not have an SRAM compiler ready, you can use the ``TOP_MACROCOMPILER_MODE='--mode synflops'`` option (Note that synthesizing a design with only flipflops is very slow and will often may not meet constraints). We call the ``make buildfile`` command while also specifying the name of the process technology we are working with (same ``tech_name`` for the configuration files and plugin name) and the configuration files we created. Note, in the ASAP7 tutorial ((:ref:`tutorial`)) these configuration files are merged into a single file called ``example-asap7.yml``. diff --git a/docs/VLSI/Building-A-Chip.rst b/docs/VLSI/Building-A-Chip.rst index adc798021..c635bc5f8 100644 --- a/docs/VLSI/Building-A-Chip.rst +++ b/docs/VLSI/Building-A-Chip.rst @@ -10,7 +10,7 @@ Transforming the RTL -------------------- Building a chip requires specializing the generic verilog emitted by FIRRTL to adhere to the constraints imposed by the technology used for fabrication. -This includes mapping Chisel memories to available technology macros such as SRAMs, mapping the input and output of your chip to connect to technology IO cells, see :ref:`Tools/Barstools:Barstools`. +This includes mapping Chisel memories to available technology macros such as SRAMs, mapping the input and output of your chip to connect to technology IO cells, see :ref:`Tools/Tapeout-Tools:Tapeout-tools`. In addition to these required transformations, it may also be beneficial to transform the RTL to make it more amenable to hierarchical physical design easier. This often includes modifying the logical hierarchy to match the physical hierarchy through grouping components together or flattening components into a single larger module. diff --git a/generators/chipyard/src/main/resources/vsrc/Analog.v b/generators/chipyard/src/main/resources/vsrc/Analog.v new file mode 100644 index 000000000..0a9abf03d --- /dev/null +++ b/generators/chipyard/src/main/resources/vsrc/Analog.v @@ -0,0 +1,11 @@ +// See LICENSE for license details + +`timescale 1ns/1ps + +module AnalogConst #(CONST, WIDTH) ( + output [WIDTH-1:0] io +); + + assign io = CONST; + +endmodule diff --git a/generators/chipyard/src/main/scala/ChipTop.scala b/generators/chipyard/src/main/scala/ChipTop.scala index d80d71af6..518afa7d1 100644 --- a/generators/chipyard/src/main/scala/ChipTop.scala +++ b/generators/chipyard/src/main/scala/ChipTop.scala @@ -10,7 +10,7 @@ import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, LazyRawModuleI import freechips.rocketchip.util.{DontTouch} import chipyard.iobinders._ -import barstools.iocell.chisel._ +import chipyard.iocell._ case object BuildSystem extends Field[Parameters => LazyModule]((p: Parameters) => new DigitalTop()(p)) diff --git a/generators/chipyard/src/main/scala/clocking/ClockBinders.scala b/generators/chipyard/src/main/scala/clocking/ClockBinders.scala index fdb2ec9e5..d075fcbbe 100644 --- a/generators/chipyard/src/main/scala/clocking/ClockBinders.scala +++ b/generators/chipyard/src/main/scala/clocking/ClockBinders.scala @@ -7,7 +7,7 @@ import freechips.rocketchip.prci._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.subsystem._ import freechips.rocketchip.tilelink._ -import barstools.iocell.chisel._ +import chipyard.iocell._ // This uses the FakePLL, which uses a ClockAtFreq Verilog blackbox to generate // the requested clocks. This also adds TileLink ClockDivider and ClockSelector diff --git a/generators/chipyard/src/main/scala/example/CustomChipTop.scala b/generators/chipyard/src/main/scala/example/CustomChipTop.scala index eb0565ce1..5958d1b27 100644 --- a/generators/chipyard/src/main/scala/example/CustomChipTop.scala +++ b/generators/chipyard/src/main/scala/example/CustomChipTop.scala @@ -6,7 +6,7 @@ import chipyard.iobinders._ import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy.{InModuleBody} import freechips.rocketchip.subsystem.{PBUS, HasTileLinkLocations} -import barstools.iocell.chisel._ +import chipyard.iocell._ import chipyard._ import chipyard.harness.{BuildTop} import sifive.blocks.devices.uart._ diff --git a/generators/chipyard/src/main/scala/example/FlatChipTop.scala b/generators/chipyard/src/main/scala/example/FlatChipTop.scala index 6b249286a..d8f659f40 100644 --- a/generators/chipyard/src/main/scala/example/FlatChipTop.scala +++ b/generators/chipyard/src/main/scala/example/FlatChipTop.scala @@ -13,7 +13,7 @@ import chipyard.{BuildSystem, DigitalTop} import chipyard.harness.{BuildTop} import chipyard.clocking._ import chipyard.iobinders._ -import barstools.iocell.chisel._ +import chipyard.iocell._ import testchipip.serdes.{SerialTLKey} class WithFlatChipTop extends Config((site, here, up) => { diff --git a/generators/chipyard/src/main/scala/harness/HarnessBinders.scala b/generators/chipyard/src/main/scala/harness/HarnessBinders.scala index f357f357c..9bf97b184 100644 --- a/generators/chipyard/src/main/scala/harness/HarnessBinders.scala +++ b/generators/chipyard/src/main/scala/harness/HarnessBinders.scala @@ -12,7 +12,7 @@ import freechips.rocketchip.subsystem._ import freechips.rocketchip.util._ import freechips.rocketchip.jtag.{JTAGIO} import freechips.rocketchip.devices.debug.{SimJTAG} -import barstools.iocell.chisel._ +import chipyard.iocell._ import testchipip.dram.{SimDRAM} import testchipip.tsi.{SimTSI, SerialRAM, TSI, TSIIO} import testchipip.soc.{TestchipSimDTM} diff --git a/generators/chipyard/src/main/scala/iobinders/IOBinders.scala b/generators/chipyard/src/main/scala/iobinders/IOBinders.scala index b4f116d25..e00f43a4a 100644 --- a/generators/chipyard/src/main/scala/iobinders/IOBinders.scala +++ b/generators/chipyard/src/main/scala/iobinders/IOBinders.scala @@ -27,7 +27,7 @@ import sifive.blocks.devices.spi._ import sifive.blocks.devices.i2c._ import tracegen.{TraceGenSystemModuleImp} -import barstools.iocell.chisel._ +import chipyard.iocell._ import testchipip.serdes.{CanHavePeripheryTLSerial, SerialTLKey} import testchipip.spi.{SPIChipIO} diff --git a/generators/chipyard/src/main/scala/iocell/Analog.scala b/generators/chipyard/src/main/scala/iocell/Analog.scala new file mode 100644 index 000000000..78e1cfe76 --- /dev/null +++ b/generators/chipyard/src/main/scala/iocell/Analog.scala @@ -0,0 +1,18 @@ +// See LICENSE for license details + +package chipyard.iocell + +import chisel3._ +import chisel3.util.{HasBlackBoxResource} +import chisel3.experimental.{Analog, IntParam} + +class AnalogConst(value: Int, width: Int = 1) + extends BlackBox(Map("CONST" -> IntParam(value), "WIDTH" -> IntParam(width))) + with HasBlackBoxResource { + val io = IO(new Bundle { val io = Analog(width.W) }) + addResource("/vsrc/Analog.v") +} + +object AnalogConst { + def apply(value: Int, width: Int = 1) = Module(new AnalogConst(value, width)).io.io +} diff --git a/generators/chipyard/src/main/scala/iocell/IOCell.scala b/generators/chipyard/src/main/scala/iocell/IOCell.scala new file mode 100644 index 000000000..5f0129b87 --- /dev/null +++ b/generators/chipyard/src/main/scala/iocell/IOCell.scala @@ -0,0 +1,338 @@ +// See LICENSE for license details + +package chipyard.iocell + +import chisel3._ +import chisel3.util.{Cat, HasBlackBoxInline} +import chisel3.reflect.DataMirror +import chisel3.experimental.{Analog, BaseModule} + +// The following four IO cell bundle types are bare-minimum functional connections +// for modeling 4 different IO cell scenarios. The intention is that the user +// would create wrapper modules that extend these interfaces with additional +// control signals. These are loosely similar to the sifive-blocks PinCtrl bundles +// (https://github.com/sifive/sifive-blocks/blob/master/src/main/scala/devices/pinctrl/PinCtrl.scala), +// but we want to avoid a dependency on an external libraries. + +/** The base IO bundle for an analog signal (typically something with no digital buffers inside) + * pad: off-chip (external) connection + * core: internal connection + */ +class AnalogIOCellBundle extends Bundle { + val pad = Analog(1.W) // Pad/bump signal (off-chip) + val core = Analog(1.W) // core signal (on-chip) +} + +/** The base IO bundle for a signal with runtime-controllable direction + * pad: off-chip (external) connection + * i: input to chip logic (output from IO cell) + * ie: enable signal for i + * o: output from chip logic (input to IO cell) + * oe: enable signal for o + */ +class DigitalGPIOCellBundle extends Bundle { + val pad = Analog(1.W) + val i = Output(Bool()) + val ie = Input(Bool()) + val o = Input(Bool()) + val oe = Input(Bool()) +} + +/** The base IO bundle for a digital output signal + * pad: off-chip (external) connection + * o: output from chip logic (input to IO cell) + * oe: enable signal for o + */ +class DigitalOutIOCellBundle extends Bundle { + val pad = Output(Bool()) + val o = Input(Bool()) + val oe = Input(Bool()) +} + +/** The base IO bundle for a digital input signal + * pad: off-chip (external) connection + * i: input to chip logic (output from IO cell) + * ie: enable signal for i + */ +class DigitalInIOCellBundle extends Bundle { + val pad = Input(Bool()) + val i = Output(Bool()) + val ie = Input(Bool()) +} + +trait IOCell extends BaseModule { + var iocell_name: Option[String] = None + + /** Set IOCell name + * @param s Proposed name for the IOCell + * + * @return An inherited IOCell with given the proposed name + */ + def suggestName(s: String): this.type = { + iocell_name = Some(s) + super.suggestName(s) + } +} + +trait AnalogIOCell extends IOCell { + val io: AnalogIOCellBundle +} + +trait DigitalGPIOCell extends IOCell { + val io: DigitalGPIOCellBundle +} + +trait DigitalInIOCell extends IOCell { + val io: DigitalInIOCellBundle +} + +trait DigitalOutIOCell extends IOCell { + val io: DigitalOutIOCellBundle +} + +// The following Generic IO cell black boxes have verilog models that mimic a very simple +// implementation of an IO cell. For building a real chip, it is important to implement +// and use similar classes which wrap the foundry-specific IO cells. + +abstract class GenericIOCell extends BlackBox with HasBlackBoxInline { + val impl: String + val moduleName = this.getClass.getSimpleName + setInline(s"$moduleName.v", impl); +} + +class GenericAnalogIOCell extends GenericIOCell with AnalogIOCell { + val io = IO(new AnalogIOCellBundle) + lazy val impl = s""" +`timescale 1ns/1ps +module GenericAnalogIOCell( + inout pad, + inout core +); + + assign core = 1'bz; + assign pad = core; + +endmodule""" +} + +class GenericDigitalGPIOCell extends GenericIOCell with DigitalGPIOCell { + val io = IO(new DigitalGPIOCellBundle) + lazy val impl = s""" +`timescale 1ns/1ps +module GenericDigitalGPIOCell( + inout pad, + output i, + input ie, + input o, + input oe +); + + assign pad = oe ? o : 1'bz; + assign i = ie ? pad : 1'b0; + +endmodule""" +} + +class GenericDigitalInIOCell extends GenericIOCell with DigitalInIOCell { + val io = IO(new DigitalInIOCellBundle) + lazy val impl = s""" +`timescale 1ns/1ps +module GenericDigitalInIOCell( + input pad, + output i, + input ie +); + + assign i = ie ? pad : 1'b0; + +endmodule""" +} + +class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell { + val io = IO(new DigitalOutIOCellBundle) + lazy val impl = s""" +`timescale 1ns/1ps +module GenericDigitalOutIOCell( + output pad, + input o, + input oe +); + + assign pad = oe ? o : 1'bz; + +endmodule""" +} + +trait IOCellTypeParams { + def analog(): AnalogIOCell + def gpio(): DigitalGPIOCell + def input(): DigitalInIOCell + def output(): DigitalOutIOCell +} + +case class GenericIOCellParams() extends IOCellTypeParams { + def analog() = Module(new GenericAnalogIOCell) + def gpio() = Module(new GenericDigitalGPIOCell) + def input() = Module(new GenericDigitalInIOCell) + def output() = Module(new GenericDigitalOutIOCell) +} + +object IOCell { + + /** From within a RawModule or MultiIOModule context, generate new module IOs from a given + * signal and return the new IO and a Seq containing all generated IO cells. + * @param coreSignal The signal onto which to add IO cells + * @param name An optional name or name prefix to use for naming IO cells + * @param abstractResetAsAsync When set, will coerce abstract resets to + * AsyncReset, and otherwise to Bool (sync reset) + * @return A tuple of (the generated IO data node, a Seq of all generated IO cell instances) + */ + def generateIOFromSignal[T <: Data]( + coreSignal: T, + name: String, + typeParams: IOCellTypeParams = GenericIOCellParams(), + abstractResetAsAsync: Boolean = false + ): (T, Seq[IOCell]) = { + val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)).suggestName(name) + val resetFn = if (abstractResetAsAsync) toAsyncReset else toSyncReset + val iocells = IOCell.generateFromSignal(coreSignal, padSignal, Some(s"iocell_$name"), typeParams, resetFn) + (padSignal, iocells) + } + + /** Connect two identical signals together by adding IO cells between them and return a Seq + * containing all generated IO cells. + * @param coreSignal The core-side (internal) signal onto which to connect/add IO cells + * @param padSignal The pad-side (external) signal onto which to connect IO cells + * @param name An optional name or name prefix to use for naming IO cells + * @return A Seq of all generated IO cell instances + */ + val toSyncReset: (Reset) => Bool = _.asBool + val toAsyncReset: (Reset) => AsyncReset = _.asAsyncReset + def generateFromSignal[T <: Data, R <: Reset]( + coreSignal: T, + padSignal: T, + name: Option[String] = None, + typeParams: IOCellTypeParams = GenericIOCellParams(), + concretizeResetFn: (Reset) => R = toSyncReset + ): Seq[IOCell] = { + def genCell[T <: Data]( + castToBool: (T) => Bool, + castFromBool: (Bool) => T + )(coreSignal: T, + padSignal: T + ): Seq[IOCell] = { + DataMirror.directionOf(coreSignal) match { + case ActualDirection.Input => { + val iocell = typeParams.input() + name.foreach(n => { + iocell.suggestName(n) + }) + coreSignal := castFromBool(iocell.io.i) + iocell.io.ie := true.B + iocell.io.pad := castToBool(padSignal) + Seq(iocell) + } + case ActualDirection.Output => { + val iocell = typeParams.output() + name.foreach(n => { + iocell.suggestName(n) + }) + iocell.io.o := castToBool(coreSignal) + iocell.io.oe := true.B + padSignal := castFromBool(iocell.io.pad) + Seq(iocell) + } + case _ => throw new Exception(s"Signal does not have a direction and cannot be matched to an IOCell") + } + } + def genCellForClock = genCell[Clock](_.asUInt.asBool, _.asClock) _ + def genCellForAsyncReset = genCell[AsyncReset](_.asBool, _.asAsyncReset) _ + def genCellForAbstractReset = genCell[Reset](_.asBool, concretizeResetFn) _ + + (coreSignal, padSignal) match { + case (coreSignal: Analog, padSignal: Analog) => { + if (coreSignal.getWidth == 0) { + Seq() + } else { + require( + coreSignal.getWidth == 1, + "Analogs wider than 1 bit are not supported because we can't bit-select Analogs (https://github.com/freechipsproject/chisel3/issues/536)" + ) + val iocell = typeParams.analog() + name.foreach(n => iocell.suggestName(n)) + iocell.io.core <> coreSignal + padSignal <> iocell.io.pad + Seq(iocell) + } + } + case (coreSignal: Clock, padSignal: Clock) => genCellForClock(coreSignal, padSignal) + case (coreSignal: AsyncReset, padSignal: AsyncReset) => genCellForAsyncReset(coreSignal, padSignal) + case (coreSignal: Bits, padSignal: Bits) => { + require(padSignal.getWidth == coreSignal.getWidth, "padSignal and coreSignal must be the same width") + if (padSignal.getWidth == 0) { + // This dummy assignment will prevent invalid firrtl from being emitted + DataMirror.directionOf(coreSignal) match { + case ActualDirection.Input => coreSignal := 0.U + case _ => {} + } + Seq() + } else { + DataMirror.directionOf(coreSignal) match { + case ActualDirection.Input => { + val iocells = padSignal.asBools.zipWithIndex.map { case (sig, i) => + val iocell = typeParams.input() + // Note that we are relying on chisel deterministically naming this in the index order (which it does) + // This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals + // An alternative solution would be to suggestName(n + "_" + i) + name.foreach(n => { + iocell.suggestName(n) + }) + iocell.io.pad := sig + iocell.io.ie := true.B + iocell + } + // Note that the reverse here is because Cat(Seq(a,b,c,d)) yields abcd, but a is index 0 of the Seq + coreSignal := Cat(iocells.map(_.io.i).reverse) + iocells + } + case ActualDirection.Output => { + val iocells = coreSignal.asBools.zipWithIndex.map { case (sig, i) => + val iocell = typeParams.output() + // Note that we are relying on chisel deterministically naming this in the index order (which it does) + // This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals + // An alternative solution would be to suggestName(n + "_" + i) + name.foreach(n => { + iocell.suggestName(n) + }) + iocell.io.o := sig + iocell.io.oe := true.B + iocell + } + // Note that the reverse here is because Cat(Seq(a,b,c,d)) yields abcd, but a is index 0 of the Seq + padSignal := Cat(iocells.map(_.io.pad).reverse) + iocells + } + case _ => throw new Exception("Bits signal does not have a direction and cannot be matched to IOCell(s)") + } + } + } + case (coreSignal: Reset, padSignal: Reset) => genCellForAbstractReset(coreSignal, padSignal) + case (coreSignal: Vec[_], padSignal: Vec[_]) => { + require(padSignal.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same") + coreSignal.zip(padSignal).zipWithIndex.foldLeft(Seq.empty[IOCell]) { case (total, ((core, pad), i)) => + val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + i), typeParams) + total ++ ios + } + } + case (coreSignal: Record, padSignal: Record) => { + coreSignal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (eltName, core)) => + val pad = padSignal.elements(eltName) + val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + eltName), typeParams) + total ++ ios + } + } + case _ => { throw new Exception("Oops, I don't know how to handle this signal.") } + } + } + +} diff --git a/generators/firechip/src/main/scala/BridgeBinders.scala b/generators/firechip/src/main/scala/BridgeBinders.scala index 550893589..48ea9bb30 100644 --- a/generators/firechip/src/main/scala/BridgeBinders.scala +++ b/generators/firechip/src/main/scala/BridgeBinders.scala @@ -26,7 +26,7 @@ import firesim.configs.MemModelKey import tracegen.{TraceGenSystemModuleImp} import cva6.CVA6Tile -import barstools.iocell.chisel._ +import chipyard.iocell._ import chipyard.iobinders._ import chipyard._ import chipyard.harness._ diff --git a/scripts/tutorial-patches/build.sbt.patch b/scripts/tutorial-patches/build.sbt.patch index b64d7a439..613790c9e 100644 --- a/scripts/tutorial-patches/build.sbt.patch +++ b/scripts/tutorial-patches/build.sbt.patch @@ -5,7 +5,7 @@ index c3be6161..2a6d7160 100644 @@ -147,7 +147,7 @@ lazy val testchipip = (project in file("generators/testchipip")) lazy val chipyard = (project in file("generators/chipyard")) - .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, iocell, + .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, - sha3, // On separate line to allow for cleaner tutorial-setup patches + //sha3, // On separate line to allow for cleaner tutorial-setup patches dsptools, rocket_dsp_utils, diff --git a/tools/barstools b/tools/barstools deleted file mode 160000 index 60a1be9bf..000000000 --- a/tools/barstools +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 60a1be9bfe344fccbddd4874524accb3c9d2ade9 diff --git a/tools/tapeout/src/main/scala/macrolib/ConfReader.scala b/tools/tapeout/src/main/scala/macrolib/ConfReader.scala new file mode 100644 index 000000000..ec701d6ee --- /dev/null +++ b/tools/tapeout/src/main/scala/macrolib/ConfReader.scala @@ -0,0 +1,95 @@ +package mdf.macrolib + +object ConfReader { + import scala.util.matching.Regex._ + + type ConfPort = (String, Boolean) // prefix (e.g. "RW0") and true if masked + + /** Rename ports like "read" to R0, "write" to W0, and "rw" to RW0, and + * return a count of read, write, and readwrite ports. + */ + def renamePorts(ports: Seq[String]): (Seq[ConfPort], Int, Int, Int) = { + var readCount = 0 + var writeCount = 0 + var readWriteCount = 0 + ( + ports.map { + _ match { + case "read" => readCount += 1; (s"R${readCount - 1}", false) + case "write" => writeCount += 1; (s"W${writeCount - 1}", false) + case "mwrite" => writeCount += 1; (s"W${writeCount - 1}", true) + case "rw" => readWriteCount += 1; (s"RW${readWriteCount - 1}", false) + case "mrw" => readWriteCount += 1; (s"RW${readWriteCount - 1}", true) + } + }, + readCount, + writeCount, + readWriteCount + ) + } + + def generateFirrtlPort(port: ConfPort, width: Int, depth: Int, maskGran: Option[Int]): MacroPort = { + val (prefix, masked) = port + val isReadWriter = prefix.startsWith("RW") + val isReader = prefix.startsWith("R") && !isReadWriter + val isWriter = prefix.startsWith("W") + val r = if (isReadWriter) "r" else "" + val w = if (isReadWriter) "w" else "" + MacroPort( + address = PolarizedPort(s"${prefix}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${prefix}_clk", PositiveEdge)), + writeEnable = if (isReadWriter) Some(PolarizedPort(s"${prefix}_${w}mode", ActiveHigh)) else None, + output = if (isReader || isReadWriter) Some(PolarizedPort(s"${prefix}_${w}data", ActiveHigh)) else None, + input = if (isWriter || isReadWriter) Some(PolarizedPort(s"${prefix}_${r}data", ActiveHigh)) else None, + maskPort = if (masked) Some(PolarizedPort(s"${prefix}_${w}mask", ActiveHigh)) else None, + maskGran = if (masked) maskGran else None, + width = Some(width), + depth = Some(depth) + ) + } + + /** Read a conf line into a SRAMMacro, but returns an error string in Left + * instead of throwing errors if the line is malformed. + */ + def readSingleLineSafe(line: String): Either[String, SRAMMacro] = { + val pattern = """name ([^\s]+) depth (\d+) width (\d+) ports ([a-z,]+)\s?(?:mask_gran (\d+))?""".r + pattern.findFirstMatchIn(line) match { + case Some(m: Match) => { + val name: String = m.group(1) + val depth: Int = (m.group(2)).toInt + val width: Int = (m.group(3)).toInt + val ports: Seq[String] = (m.group(4)).split(",") + val (firrtlPorts, readPortCount, writePortCount, readWritePortCount) = renamePorts(ports) + val familyStr = + (if (readPortCount > 0) s"${readPortCount}r" else "") + + (if (writePortCount > 0) s"${writePortCount}w" else "") + + (if (readWritePortCount > 0) s"${readWritePortCount}rw" else "") + val maskGran: Option[Int] = Option(m.group(5)).map(_.toInt) + Right( + SRAMMacro( + name = name, + width = width, + depth = depth, + family = familyStr, + vt = "", + mux = 1, + ports = firrtlPorts.map(generateFirrtlPort(_, width, depth, maskGran)), + extraPorts = List() + ) + ) + } + case _ => Left("Input line did not match conf regex") + } + } + + /** Read a conf line into a SRAMMacro. */ + def readSingleLine(line: String): SRAMMacro = { + readSingleLineSafe(line).right.get + } + + /** Read the contents of the conf file into a seq of SRAMMacro. */ + def readFromString(contents: String): Seq[SRAMMacro] = { + // Trim, remove empty lines, then pass to readSingleLine + contents.split("\n").map(_.trim).filter(_ != "").map(readSingleLine(_)) + } +} diff --git a/tools/tapeout/src/main/scala/macrolib/FillerMacroBase.scala b/tools/tapeout/src/main/scala/macrolib/FillerMacroBase.scala new file mode 100644 index 000000000..688871b5d --- /dev/null +++ b/tools/tapeout/src/main/scala/macrolib/FillerMacroBase.scala @@ -0,0 +1,61 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.language.implicitConversions + +// Filler and metal filler +abstract class FillerMacroBase(name: String, vt: String) extends Macro { + override def toString(): String = { + s"${this.getClass.getSimpleName}(name=${name}, vt=${vt})" + } + + override def toJSON(): JsObject = { + JsObject( + Seq( + "type" -> JsString(typeStr), + "name" -> Json.toJson(name), + "vt" -> Json.toJson(vt) + ) + ) + } +} +object FillerMacroBase { + def parseJSON(json: Map[String, JsValue]): Option[FillerMacroBase] = { + val typee: String = json.get("type") match { + case Some(x: JsString) => + x.value match { + case "" => return None + case x => x + } + case _ => return None + } + val name: String = json.get("name") match { + case Some(x: JsString) => + x.value match { + case "" => return None + case x => x + } + case _ => return None + } + val vt: String = json.get("vt") match { + case Some(x: JsString) => + x.value match { + case "" => return None + case x => x + } + case _ => return None + } + typee match { + case "metal filler cell" => Some(MetalFillerMacro(name, vt)) + case "filler cell" => Some(FillerMacro(name, vt)) + case _ => None + } + } +} + +case class FillerMacro(name: String, vt: String) extends FillerMacroBase(name, vt) { + override def typeStr = "filler cell" +} +case class MetalFillerMacro(name: String, vt: String) extends FillerMacroBase(name, vt) { + override def typeStr = "metal filler cell" +} diff --git a/tools/tapeout/src/main/scala/macrolib/FlipChipMacro.scala b/tools/tapeout/src/main/scala/macrolib/FlipChipMacro.scala new file mode 100644 index 000000000..88a20aeaa --- /dev/null +++ b/tools/tapeout/src/main/scala/macrolib/FlipChipMacro.scala @@ -0,0 +1,72 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +// Flip Chip Macro +case class FlipChipMacro( + name: String, + bumpDimensions: (Int, Int), + bumpLocations: Seq[Seq[String]]) + extends Macro { + override def toJSON(): JsObject = { + + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "name" -> Json.toJson(name), + "type" -> Json.toJson(typeStr), + "bump_dimensions" -> JsArray(Seq(bumpDimensions._1, bumpDimensions._2).map { JsNumber(_) }), + "bump_locations" -> JsArray(bumpLocations.map(l => JsArray(l.map(JsString)))) + ) + ) + + JsObject(output) + } + val maxIONameSize = bumpLocations.foldLeft(0) { (size, row) => + row.foldLeft(size) { (size, str) => scala.math.max(size, str.length) } + } + def visualize: String = { + val output = new StringBuffer() + for (x <- 0 until bumpDimensions._1) { + for (y <- 0 until bumpDimensions._2) { + val name = bumpLocations(x)(y).drop(1).dropRight(1) + val extra = maxIONameSize - name.length() + val leftSpace = " " * (extra / 2) + val rightSpace = " " * (extra / 2 + extra % 2) + output.append(leftSpace + name + rightSpace + "|") + } + output.append("\n") + } + output.toString() + } + + override def typeStr = "flipchip" +} + +object FlipChipMacro { + def parseJSON(json: Map[String, JsValue]): Option[FlipChipMacro] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + + val bumpDimensions: (Int, Int) = json.get("bump_dimensions") match { + case Some(JsArray(x)) if x.size == 2 => + val z = x.map(_.as[JsNumber].value.intValue) + (z(0), z(1)) + case None => return None + } + val bumpLocations: Seq[Seq[String]] = json.get("bump_locations") match { + case Some(JsArray(array)) => + array.collect { case JsArray(a2) => a2.map(_.toString).toSeq }.toSeq + case _ => return None + } + // Can't have dimensions and locations which don't match + if (bumpLocations.size != bumpDimensions._1) return None + if (bumpLocations.collect { case x if x.size != bumpDimensions._2 => x }.nonEmpty) return None + + Some(FlipChipMacro(name, bumpDimensions, bumpLocations)) + } +} diff --git a/tools/tapeout/src/main/scala/macrolib/IOMacro.scala b/tools/tapeout/src/main/scala/macrolib/IOMacro.scala new file mode 100644 index 000000000..3f8ead8c2 --- /dev/null +++ b/tools/tapeout/src/main/scala/macrolib/IOMacro.scala @@ -0,0 +1,147 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +sealed abstract class PortType { def toJSON(): JsString = JsString(toString) } +case object Digital extends PortType { override def toString: String = "digital" } +case object Analog extends PortType { override def toString: String = "analog" } +case object Power extends PortType { override def toString: String = "power" } +case object Ground extends PortType { override def toString: String = "ground" } +case object NoConnect extends PortType { override def toString: String = "NC" } + +sealed abstract class Direction { def toJSON(): JsString = JsString(toString) } +case object Input extends Direction { override def toString: String = "input" } +case object Output extends Direction { override def toString: String = "output" } +case object InOut extends Direction { override def toString: String = "inout" } + +sealed abstract class Termination { def toJSON(): JsValue } +case object CMOS extends Termination { override def toJSON(): JsString = JsString("CMOS") } +case class Resistive(ohms: Int) extends Termination { override def toJSON(): JsNumber = JsNumber(ohms) } + +sealed abstract class TerminationType { def toJSON(): JsString } +case object Single extends TerminationType { override def toJSON(): JsString = JsString("single") } +case object Differential extends TerminationType { override def toJSON(): JsString = JsString("differential") } + +// IO macro +case class IOMacro( + name: String, + tpe: PortType, + direction: Option[Direction] = None, + termination: Option[Termination] = None, + terminationType: Option[TerminationType] = None, + terminationReference: Option[String] = None, + matching: Seq[String] = Seq.empty[String], + bbname: Option[String] = None) + extends Macro { + override def toJSON(): JsObject = { + + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "name" -> Json.toJson(name), + "type" -> tpe.toJSON() + ) + ) + if (direction.isDefined) output.append("direction" -> direction.get.toJSON) + if (termination.isDefined) output.append("termination" -> termination.get.toJSON) + if (terminationType.isDefined) output.append("terminationType" -> terminationType.get.toJSON) + if (terminationReference.isDefined) output.append("terminationReference" -> JsString(terminationReference.get)) + if (matching.nonEmpty) output.append("match" -> JsArray(matching.map(JsString))) + if (bbname.nonEmpty) output.append("blackBox" -> JsString(bbname.get)) + + JsObject(output) + } + + override def typeStr = "iomacro" +} +object IOMacro { + def parseJSON(json: Map[String, JsValue]): Option[IOMacro] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val tpe: PortType = json.get("type") match { + case Some(JsString("power")) => Power + case Some(JsString("ground")) => Ground + case Some(JsString("digital")) => Digital + case Some(JsString("analog")) => Analog + case Some(JsString("NC")) => NoConnect + case _ => return None + } + val direction: Option[Direction] = json.get("direction") match { + case Some(JsString("input")) => Some(Input) + case Some(JsString("output")) => Some(Output) + case Some(JsString("inout")) => Some(InOut) + case _ => None + } + val termination: Option[Termination] = json.get("termination") match { + case Some(JsNumber(x)) => Some(Resistive(x.toInt)) + case Some(JsString("CMOS")) => Some(CMOS) + case _ => None + } + val terminationType: Option[TerminationType] = json.get("terminationType") match { + case Some(JsString("differential")) => Some(Differential) + case Some(JsString("single")) => Some(Single) + case _ => None + } + val terminationRef: Option[String] = json.get("terminationReference") match { + case Some(JsString(x)) => Some(x) + case _ if terminationType.isDefined => return None + case _ => None + } + val matching: Seq[String] = json.get("match") match { + case Some(JsArray(array)) => array.map(_.as[JsString].value).toList + case _ => Seq.empty[String] + } + val bbname: Option[String] = json.get("blackBox") match { + case Some(JsString(module)) => Some(module) + case Some(_) => return None + case _ => None + } + Some(IOMacro(name, tpe, direction, termination, terminationType, terminationRef, matching, bbname)) + } +} + +case class IOProperties(name: String, top: String, ios: Seq[IOMacro]) extends Macro { + override def toJSON(): JsObject = { + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "name" -> Json.toJson(name), + "top" -> Json.toJson(top), + "type" -> Json.toJson(typeStr), + "ios" -> JsArray(ios.map(_.toJSON)) + ) + ) + JsObject(output) + } + + override def typeStr = "io_properties" + +} + +object IOProperties { + def parseJSON(json: Map[String, JsValue]): Option[IOProperties] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val top: String = json.get("top") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val ios: Seq[IOMacro] = json.get("ios") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + val b = IOMacro.parseJSON(a); + if (b == None) { + return None + } else b.get + } + case _ => List() + } + Some(IOProperties(name, top, ios)) + } +} diff --git a/tools/tapeout/src/main/scala/macrolib/MacroLib.scala b/tools/tapeout/src/main/scala/macrolib/MacroLib.scala new file mode 100644 index 000000000..569c4dacb --- /dev/null +++ b/tools/tapeout/src/main/scala/macrolib/MacroLib.scala @@ -0,0 +1,19 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +// TODO: decide if we should always silently absorb errors + +// See macro_format.yml for the format description. + +// "Base class" for macros +abstract class Macro { + def name: String + + // Type of macro is determined by subclass + def typeStr: String + + def toJSON(): JsObject +} diff --git a/tools/tapeout/src/main/scala/macrolib/SRAM.scala b/tools/tapeout/src/main/scala/macrolib/SRAM.scala new file mode 100644 index 000000000..ea51b0490 --- /dev/null +++ b/tools/tapeout/src/main/scala/macrolib/SRAM.scala @@ -0,0 +1,444 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +// SRAM macro +case class SRAMMacro( + name: String, + width: Int, + depth: BigInt, + family: String, + ports: Seq[MacroPort], + vt: String = "", + mux: Int = 1, + extraPorts: Seq[MacroExtraPort] = List()) + extends Macro { + override def toJSON(): JsObject = { + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "type" -> JsString("sram"), + "name" -> Json.toJson(name), + "width" -> Json.toJson(width), + "depth" -> Json.toJson(depth.toString), + "mux" -> Json.toJson(mux), + "mask" -> Json.toJson(ports.exists(p => p.maskPort.isDefined)), + "ports" -> JsArray(ports.map { _.toJSON }) + ) + ) + if (family != "") { + output.appendAll(Seq("family" -> Json.toJson(family))) + } + if (vt != "") { + output.appendAll(Seq("vt" -> Json.toJson(vt))) + } + if (extraPorts.length > 0) { + output.appendAll(Seq("extra ports" -> JsArray(extraPorts.map { _.toJSON }))) + } + + JsObject(output) + } + + override def typeStr = "sram" +} +object SRAMMacro { + def parseJSON(json: Map[String, JsValue]): Option[SRAMMacro] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val width: Int = json.get("width") match { + case Some(x: JsNumber) => x.value.intValue + case _ => return None + } + val depth: BigInt = json.get("depth") match { + case Some(x: JsString) => + try { BigInt(x.as[String]) } + catch { case _: Throwable => return None } + case _ => return None + } + val family: String = json.get("family") match { + case Some(x: JsString) => x.as[String] + case _ => "" // optional + } + val vt: String = json.get("vt") match { + case Some(x: JsString) => x.as[String] + case _ => "" // optional + } + val mux: Int = json.get("mux") match { + case Some(x: JsNumber) => x.value.intValue + case _ => 1 // default + } + val ports: Seq[MacroPort] = json.get("ports") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + val b = MacroPort.parseJSON(a, width, depth); + if (b == None) { + return None + } else b.get + } + case _ => List() + } + if (ports.length == 0) { + // Can't have portless memories. + return None + } + val extraPorts: Seq[MacroExtraPort] = json.get("extra ports") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + val b = MacroExtraPort.parseJSON(a); + if (b == None) { + return None + } else b.get + } + case _ => List() + } + Some(SRAMMacro(name, width, depth, family, ports, vt, mux, extraPorts)) + } +} + +// SRAM compiler +case class SRAMGroup( + name: Seq[String], + family: String, + vt: Seq[String], + mux: Int, + depth: Range, + width: Range, + ports: Seq[MacroPort], + extraPorts: Seq[MacroExtraPort] = List()) { + def toJSON: JsObject = { + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "name" -> JsArray(name.map(Json.toJson(_))), + "vt" -> JsArray(vt.map(Json.toJson(_))), + "mux" -> Json.toJson(mux), + "depth" -> JsArray(Seq(depth.start, depth.end, depth.step).map { x => Json.toJson(x) }), + "width" -> JsArray(Seq(width.start, width.end, width.step).map { x => Json.toJson(x) }), + "ports" -> JsArray(ports.map { _.toJSON }) + ) + ) + if (family != "") { + output.appendAll(Seq("family" -> Json.toJson(family))) + } + if (extraPorts.length > 0) { + output.appendAll(Seq("extra ports" -> JsArray(extraPorts.map { _.toJSON }))) + } + JsObject(output) + } +} +object SRAMGroup { + def parseJSON(json: Map[String, JsValue]): Option[SRAMGroup] = { + val family: String = json.get("family") match { + case Some(x: JsString) => x.as[String] + case _ => "" // optional + } + val name: Seq[String] = json.get("name") match { + case Some(x: JsArray) => x.as[List[JsString]].map(_.as[String]) + case _ => return None + } + val vt: Seq[String] = json.get("vt") match { + case Some(x: JsArray) => x.as[List[JsString]].map(_.as[String]) + case _ => return None + } + val mux: Int = json.get("mux") match { + case Some(x: JsNumber) => x.value.intValue + case _ => return None + } + val depth: Range = json.get("depth") match { + case Some(x: JsArray) => + val seq = x.as[List[JsNumber]].map(_.value.intValue) + Range.inclusive(seq(0), seq(1), seq(2)) + case _ => return None + } + val width: Range = json.get("width") match { + case Some(x: JsArray) => + val seq = x.as[List[JsNumber]].map(_.value.intValue) + Range.inclusive(seq(0), seq(1), seq(2)) + case _ => return None + } + val ports: Seq[MacroPort] = json.get("ports") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + { + val b = MacroPort.parseJSON(a, None, None); + if (b == None) { + return None + } else b.get + } + } + case _ => List() + } + if (ports.length == 0) { + // Can't have portless memories. + return None + } + val extraPorts: Seq[MacroExtraPort] = json.get("extra ports") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + { + val b = MacroExtraPort.parseJSON(a); + if (b == None) { + return None + } else b.get + } + } + case _ => List() + } + Some(SRAMGroup(name, family, vt, mux, depth, width, ports, extraPorts)) + } +} + +case class SRAMCompiler( + name: String, + groups: Seq[SRAMGroup]) + extends Macro { + override def toJSON(): JsObject = { + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "type" -> Json.toJson("sramcompiler"), + "name" -> Json.toJson(name), + "groups" -> JsArray(groups.map { _.toJSON }) + ) + ) + + JsObject(output) + } + + override def typeStr = "sramcompiler" +} +object SRAMCompiler { + def parseJSON(json: Map[String, JsValue]): Option[SRAMCompiler] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val groups: Seq[SRAMGroup] = json.get("groups") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + { + val b = SRAMGroup.parseJSON(a); + if (b == None) { return None } + else b.get + } + } + case _ => List() + } + if (groups.length == 0) { + // Can't have portless memories. + return None + } + Some(SRAMCompiler(name, groups)) + } +} + +// Type of extra port +sealed abstract class MacroExtraPortType +case object Constant extends MacroExtraPortType +object MacroExtraPortType { + implicit def toMacroExtraPortType(s: Any): Option[MacroExtraPortType] = { + s match { + case "constant" => Some(Constant) + case _ => None + } + } + + implicit def toString(t: MacroExtraPortType): String = { + t match { + case Constant => "constant" + case _ => "" + } + } +} + +// Extra port in SRAM +case class MacroExtraPort( + name: String, + width: Int, + portType: MacroExtraPortType, + value: BigInt) { + def toJSON(): JsObject = { + JsObject( + Seq( + "name" -> Json.toJson(name), + "width" -> Json.toJson(width), + "type" -> JsString(MacroExtraPortType.toString(portType)), + "value" -> JsNumber(BigDecimal(value)) + ) + ) + } +} +object MacroExtraPort { + def parseJSON(json: Map[String, JsValue]): Option[MacroExtraPort] = { + val name = json.get("name") match { + case Some(x: JsString) => x.value + case _ => return None + } + val width = json.get("width") match { + case Some(x: JsNumber) => x.value.intValue + case _ => return None + } + val portType: MacroExtraPortType = json.get("type") match { + case Some(x: JsString) => + MacroExtraPortType.toMacroExtraPortType(x.value) match { + case Some(t: MacroExtraPortType) => t + case _ => return None + } + case _ => return None + } + val value = json.get("value") match { + case Some(x: JsNumber) => x.value.toBigInt + case _ => return None + } + Some(MacroExtraPort(name, width, portType, value)) + } +} + +// A named port that also has polarity. +case class PolarizedPort(name: String, polarity: PortPolarity) { + def toSeqMap(prefix: String): Seq[Tuple2[String, JsValue]] = { + Seq( + prefix + " port name" -> Json.toJson(name), + prefix + " port polarity" -> JsString(polarity) + ) + } +} +object PolarizedPort { + // Parse a pair of " port name" and " port polarity" keys into a + // polarized port definition. + def parseJSON(json: Map[String, JsValue], prefix: String): Option[PolarizedPort] = { + val name = json.get(prefix + " port name") match { + case Some(x: JsString) => Some(x.value) + case _ => None + } + val polarity: Option[PortPolarity] = json.get(prefix + " port polarity") match { + case Some(x: JsString) => Some(x.value) + case _ => None + } + + (name, polarity) match { + case (Some(n: String), Some(p: PortPolarity)) => Some(PolarizedPort(n, p)) + case _ => None + } + } +} + +// A SRAM memory port +case class MacroPort( + address: PolarizedPort, + clock: Option[PolarizedPort] = None, + writeEnable: Option[PolarizedPort] = None, + readEnable: Option[PolarizedPort] = None, + chipEnable: Option[PolarizedPort] = None, + output: Option[PolarizedPort] = None, + input: Option[PolarizedPort] = None, + maskPort: Option[PolarizedPort] = None, + maskGran: Option[Int] = None, + // For internal use only; these aren't port-specific. + width: Option[Int], + depth: Option[BigInt]) { + def effectiveMaskGran = maskGran.getOrElse(width.get) + + def toJSON(): JsObject = { + val keys: Seq[Tuple2[String, Option[Any]]] = Seq( + "address" -> Some(address), + "clock" -> clock, + "write enable" -> writeEnable, + "read enable" -> readEnable, + "chip enable" -> chipEnable, + "output" -> output, + "input" -> input, + "mask" -> maskPort, + "mask granularity" -> maskGran + ) + JsObject(keys.flatMap(k => { + val (key, value) = k + value match { + case Some(x: Int) => Seq(key -> JsNumber(x)) + case Some(x: PolarizedPort) => x.toSeqMap(key) + case _ => List() + } + })) + } + + // Check that all port names are unique. + private val polarizedPorts = + List(Some(address), clock, writeEnable, readEnable, chipEnable, output, input, maskPort).flatten + assert(polarizedPorts.distinct.size == polarizedPorts.size, "All port names must be unique") +} +object MacroPort { + def parseJSON(json: Map[String, JsValue]): Option[MacroPort] = parseJSON(json, None, None) + def parseJSON(json: Map[String, JsValue], width: Int, depth: BigInt): Option[MacroPort] = + parseJSON(json, Some(width), Some(depth)) + def parseJSON(json: Map[String, JsValue], width: Option[Int], depth: Option[BigInt]): Option[MacroPort] = { + val address = PolarizedPort.parseJSON(json, "address") + if (address == None) { + return None + } + + val clock = PolarizedPort.parseJSON(json, "clock") + // TODO: validate based on family (e.g. 1rw must have a write enable, etc) + val writeEnable = PolarizedPort.parseJSON(json, "write enable") + val readEnable = PolarizedPort.parseJSON(json, "read enable") + val chipEnable = PolarizedPort.parseJSON(json, "chip enable") + + val output = PolarizedPort.parseJSON(json, "output") + val input = PolarizedPort.parseJSON(json, "input") + + val maskPort = PolarizedPort.parseJSON(json, "mask") + val maskGran: Option[Int] = json.get("mask granularity") match { + case Some(x: JsNumber) => Some(x.value.intValue) + case _ => None + } + + if (maskPort.isDefined != maskGran.isDefined) { + return None + } + + Some( + MacroPort( + width = width, + depth = depth, + address = address.get, + clock = clock, + writeEnable = writeEnable, + readEnable = readEnable, + chipEnable = chipEnable, + output = output, + input = input, + maskPort = maskPort, + maskGran = maskGran + ) + ) + } +} + +// Port polarity +trait PortPolarity +case object ActiveLow extends PortPolarity +case object ActiveHigh extends PortPolarity +case object NegativeEdge extends PortPolarity +case object PositiveEdge extends PortPolarity +object PortPolarity { + implicit def toPortPolarity(s: String): PortPolarity = (s: @unchecked) match { + case "active low" => ActiveLow + case "active high" => ActiveHigh + case "negative edge" => NegativeEdge + case "positive edge" => PositiveEdge + } + implicit def toPortPolarity(s: Option[String]): Option[PortPolarity] = + s.map(toPortPolarity) + + implicit def toString(p: PortPolarity): String = { + p match { + case ActiveLow => "active low" + case ActiveHigh => "active high" + case NegativeEdge => "negative edge" + case PositiveEdge => "positive edge" + } + } +} diff --git a/tools/tapeout/src/main/scala/macrolib/Utils.scala b/tools/tapeout/src/main/scala/macrolib/Utils.scala new file mode 100644 index 000000000..547f910cf --- /dev/null +++ b/tools/tapeout/src/main/scala/macrolib/Utils.scala @@ -0,0 +1,96 @@ +package mdf.macrolib + +import play.api.libs.json._ + +import java.io.FileNotFoundException +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +object Utils { + // Read a MDF file from a String. + def readMDFFromString(str: String): Option[Seq[Macro]] = { + Json.parse(str) match { + // Make sure that the document is a list. + case arr: JsArray => { + val result: List[Option[Macro]] = arr.as[List[Map[String, JsValue]]].map { obj => + // Check the type of object. + val objTypeStr: String = obj.get("type") match { + case Some(x: JsString) => x.as[String] + case _ => return None // error, no type found + } + objTypeStr match { + case "filler cell" | "metal filler cell" => FillerMacroBase.parseJSON(obj) + case "sram" => SRAMMacro.parseJSON(obj) + case "sramcompiler" => SRAMCompiler.parseJSON(obj) + case "io_properties" => IOProperties.parseJSON(obj) + case "flipchip" => FlipChipMacro.parseJSON(obj) + case _ => None // skip unknown macro types + } + } + // Remove all the Nones and convert back to Seq[Macro] + Some(result.filter { x => x != None }.map { x => x.get }) + } + case _ => None + } + } + + // Read a MDF file from a path. + def readMDFFromPath(path: Option[String]): Option[Seq[Macro]] = { + path match { + case None => None + // Read file into string and parse + case Some(p) => + try { + Utils.readMDFFromString(scala.io.Source.fromFile(p).mkString) + } catch { + case f: FileNotFoundException => + println(s"FILE NOT FOUND $p in dir ${os.pwd}") + throw f + } + } + } + + // Write a MDF file to a String. + def writeMDFToString(s: Seq[Macro]): String = { + Json.prettyPrint(JsArray(s.map(_.toJSON))) + } + + // Write a MDF file from a path. + // Returns true upon success. + def writeMDFToPath(path: Option[String], s: Seq[Macro]): Boolean = { + path match { + case None => false + // Read file into string and parse + case Some(p: String) => { + import java.io._ + val pw = new PrintWriter(new File(p)) + pw.write(writeMDFToString(s)) + val error = pw.checkError + pw.close() + !error + } + } + } + + // Write a macro file to a String. + def writeMacroToString(s: Macro): String = { + Json.prettyPrint(s.toJSON) + } + + // Write a Macro file from a path. + // Returns true upon success. + def writeMacroToPath(path: Option[String], s: Macro): Boolean = { + path match { + case None => false + // Read file into string and parse + case Some(p: String) => { + import java.io._ + val pw = new PrintWriter(new File(p)) + pw.write(writeMacroToString(s)) + val error = pw.checkError + pw.close() + !error + } + } + } +} diff --git a/tools/tapeout/src/main/scala/macros/CostMetric.scala b/tools/tapeout/src/main/scala/macros/CostMetric.scala new file mode 100644 index 000000000..1be339670 --- /dev/null +++ b/tools/tapeout/src/main/scala/macros/CostMetric.scala @@ -0,0 +1,205 @@ +// See LICENSE for license details. + +package tapeout.macros + +/** Trait which can calculate the cost of compiling a memory against a certain + * library memory macro using a cost function. + */ +// TODO: eventually explore compiling a single target memory using multiple +// different kinds of target memory. +trait CostMetric extends Serializable { + + /** Cost function that returns the cost of compiling a memory using a certain + * macro. + * + * @param mem Memory macro to compile (target memory) + * @param lib Library memory macro to use (library memory) + * @return The cost of this compile, defined by this cost metric, or None if + * it cannot be compiled. + */ + def cost(mem: Macro, lib: Macro): Option[Double] + + /** Helper function to return the map of arguments (or an empty map if there are none). + */ + def commandLineParams(): Map[String, String] + + // We also want this to show up for the class itself. + def name(): String +} + +// Is there a better way to do this? (static method associated to CostMetric) +trait CostMetricCompanion { + def name(): String + + /** Construct this cost metric from a command line mapping. */ + def construct(m: Map[String, String]): CostMetric +} + +// Some default cost functions. + +/** Palmer's old metric. + * TODO: figure out what is the difference between this metric and the current + * default metric and either revive or delete this metric. + */ +object OldMetric extends CostMetric with CostMetricCompanion { + override def cost(mem: Macro, lib: Macro): Option[Double] = { + /* Palmer: A quick cost function (that must be kept in sync with + * memory_cost()) that attempts to avoid compiling unnecessary + * memories. This is a lower bound on the cost of compiling a + * memory: it assumes 100% bit-cell utilization when mapping. */ + // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + + // (mem.depth * mem.width) + ??? + } + + override def commandLineParams() = Map.empty[String, String] + override def name() = "OldMetric" + override def construct(m: Map[String, String]): CostMetric = OldMetric +} + +/** An external cost function. + * Calls the specified path with paths to the JSON MDF representation of the mem + * and lib macros. The external executable should print a Double. + * None will be returned if the external executable does not print a valid + * Double. + */ +class ExternalMetric(path: String) extends CostMetric { + import mdf.macrolib.Utils.writeMacroToPath + + import java.io._ + import scala.language.postfixOps + import sys.process._ + + override def cost(mem: Macro, lib: Macro): Option[Double] = { + // Create temporary files. + val memFile = File.createTempFile("_macrocompiler_mem_", ".json") + val libFile = File.createTempFile("_macrocompiler_lib_", ".json") + + writeMacroToPath(Some(memFile.getAbsolutePath), mem.src) + writeMacroToPath(Some(libFile.getAbsolutePath), lib.src) + + // !! executes the given command + val result: String = (s"$path ${memFile.getAbsolutePath} ${libFile.getAbsolutePath}" !!).trim + + // Remove temporary files. + memFile.delete() + libFile.delete() + + try { + Some(result.toDouble) + } catch { + case _: NumberFormatException => None + } + } + + override def commandLineParams() = Map("path" -> path) + override def name(): String = ExternalMetric.name() +} + +object ExternalMetric extends CostMetricCompanion { + override def name() = "ExternalMetric" + + /** Construct this cost metric from a command line mapping. */ + override def construct(m: Map[String, String]): ExternalMetric = { + val pathOption = m.get("path") + pathOption match { + case Some(path: String) => new ExternalMetric(path) + case _ => throw new IllegalArgumentException("ExternalMetric missing option 'path'") + } + } +} + +/** The current default metric in barstools, re-defined by Donggyu. */ +// TODO: write tests for this function to make sure it selects the right things +object DefaultMetric extends CostMetric with CostMetricCompanion { + override def cost(mem: Macro, lib: Macro): Option[Double] = { + val memMask = mem.src.ports.map(_.maskGran).find(_.isDefined).flatten + val libMask = lib.src.ports.map(_.maskGran).find(_.isDefined).flatten + val memWidth = (memMask, libMask) match { + case (None, _) => mem.src.width + case (Some(p), None) => + (mem.src.width / p) * math.ceil( + p.toDouble / lib.src.width + ) * lib.src.width //We map the mask to distinct memories + case (Some(p), Some(m)) => + if (m <= p) (mem.src.width / p) * math.ceil(p.toDouble / m) * m //Using multiple m's to create a p (integrally) + else (mem.src.width / p) * m //Waste the extra maskbits + } + val maskPenalty = (memMask, libMask) match { + case (None, Some(_)) => 0.001 + case (_, _) => 0 + } + val depthCost = math.ceil(mem.src.depth.toDouble / lib.src.depth.toDouble) + val widthCost = math.ceil(memWidth / lib.src.width.toDouble) + val bitsCost = (lib.src.depth * lib.src.width).toDouble + // Fraction of wasted bits plus const per mem + val requestedBits = (mem.src.depth * mem.src.width).toDouble + val bitsWasted = depthCost * widthCost * bitsCost - requestedBits + val wastedConst = 0.05 // 0 means waste as few bits with no regard for instance count + val costPerInst = wastedConst * depthCost * widthCost + Some(1.0 * bitsWasted / requestedBits + costPerInst + maskPenalty) + } + + override def commandLineParams() = Map.empty[String, String] + override def name() = "DefaultMetric" + override def construct(m: Map[String, String]): CostMetric = DefaultMetric +} + +object MacroCompilerUtil { + import java.io._ + import java.util.Base64 + + // Adapted from https://stackoverflow.com/a/134918 + + /** Serialize an arbitrary object to String. + * Used to pass structured values through as an annotation. + */ + def objToString(o: Serializable): String = { + val byteOutput: ByteArrayOutputStream = new ByteArrayOutputStream + val objectOutput: ObjectOutputStream = new ObjectOutputStream(byteOutput) + objectOutput.writeObject(o) + objectOutput.close() + Base64.getEncoder.encodeToString(byteOutput.toByteArray) + } + + /** Deserialize an arbitrary object from String. */ + def objFromString(s: String): AnyRef = { + val data = Base64.getDecoder.decode(s) + val ois: ObjectInputStream = new ObjectInputStream(new ByteArrayInputStream(data)) + val o = ois.readObject + ois.close() + o + } +} + +object CostMetric { + + /** Define some default metric. */ + val default: CostMetric = DefaultMetric + + val costMetricCreators: scala.collection.mutable.Map[String, CostMetricCompanion] = scala.collection.mutable.Map() + + // Register some default metrics + registerCostMetric(OldMetric) + registerCostMetric(ExternalMetric) + registerCostMetric(DefaultMetric) + + /** Register a cost metric. + * @param createFuncHelper Companion object to fetch the name and construct + * the metric. + */ + def registerCostMetric(createFuncHelper: CostMetricCompanion): Unit = { + costMetricCreators.update(createFuncHelper.name(), createFuncHelper) + } + + /** Select a cost metric from string. */ + def getCostMetric(m: String, params: Map[String, String]): CostMetric = { + if (m == "default") { + CostMetric.default + } else if (!costMetricCreators.contains(m)) { + throw new IllegalArgumentException("Invalid cost metric " + m) + } else { + costMetricCreators(m).construct(params) + } + } +} diff --git a/tools/tapeout/src/main/scala/macros/MacroCompiler.scala b/tools/tapeout/src/main/scala/macros/MacroCompiler.scala new file mode 100644 index 000000000..fbf857fea --- /dev/null +++ b/tools/tapeout/src/main/scala/macros/MacroCompiler.scala @@ -0,0 +1,981 @@ +// See LICENSE for license details. + +/** Terminology note: + * mem - target memory to compile, in design (e.g. Mem() in rocket) + * lib - technology SRAM(s) to use to compile mem + */ + +package tapeout.macros + +import tapeout.macros.Utils._ +import firrtl.Utils.{one, zero, BoolType} +import firrtl.annotations._ +import firrtl.ir._ +import firrtl.options.Dependency +import firrtl.stage.TransformManager.TransformDependency +import firrtl.stage.{FirrtlSourceAnnotation, FirrtlStage, Forms, OutputFileAnnotation, RunFirrtlTransformAnnotation} +import firrtl.{PrimOps, _} +import mdf.macrolib.{PolarizedPort, PortPolarity, SRAMCompiler, SRAMGroup, SRAMMacro} + +import java.io.{File, FileWriter} +import scala.annotation.tailrec +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer + +case class MacroCompilerException(msg: String) extends Exception(msg) + +// TODO The parameters could be unpacked here instead of keeping it in a serialized form +case class MacroCompilerAnnotation(content: String) extends NoTargetAnnotation { + import MacroCompilerAnnotation.Params + + def params: Params = MacroCompilerUtil.objFromString(content).asInstanceOf[Params] +} + +/** The MacroCompilerAnnotation to trigger the macro compiler. + * Note that this annotation does NOT actually target any modules for + * compilation. It simply holds all the settings for the memory compiler. The + * actual selection of which memories to compile is set in the Params. + * + * To use, simply annotate the entire circuit itself with this annotation and + * include [[MacroCompilerTransform]]. + */ +object MacroCompilerAnnotation { + + /** Macro compiler mode. */ + sealed trait CompilerMode + + /** Strict mode - must compile all memories or error out. */ + case object Strict extends CompilerMode + + /** Synflops mode - compile all memories with synflops (do not map to lib at all). */ + case object Synflops extends CompilerMode + + /** CompileAndSynflops mode - compile all memories and create mock versions of the target libs with synflops. */ + case object CompileAndSynflops extends CompilerMode + + /** FallbackSynflops - compile all memories to SRAM when possible and fall back to synflops if a memory fails. * */ + case object FallbackSynflops extends CompilerMode + + /** CompileAvailable - compile what is possible and do nothing with uncompiled memories. * */ + case object CompileAvailable extends CompilerMode + + /** The default mode for the macro compiler. + * TODO: Maybe set the default to FallbackSynflops (typical for + * vlsi_mem_gen-like scripts) once it's implemented? + */ + val Default: CompilerMode = CompileAvailable + + // Options as list of (CompilerMode, command-line name, description) + val options: Seq[(CompilerMode, String, String)] = Seq( + (Default, "default", "Select the default option from below."), + (Strict, "strict", "Compile all memories to library or return an error."), + ( + Synflops, + "synflops", + "Produces synthesizable flop-based memories for all memories (do not map to lib at all); likely useful for simulation purposes." + ), + ( + CompileAndSynflops, + "compileandsynflops", + "Compile all memories and create mock versions of the target libs with synflops; likely also useful for simulation purposes." + ), + ( + FallbackSynflops, + "fallbacksynflops", + "Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible." + ), + ( + CompileAvailable, + "compileavailable", + "Compile all memories to library when possible and do nothing in case of errors. (default)" + ) + ) + + /** Helper function to select a compiler mode. */ + def stringToCompilerMode(str: String): CompilerMode = options.collectFirst { + case (mode, cmd, _) if cmd == str => mode + } match { + case Some(x) => x + case None => throw new IllegalArgumentException("No such compiler mode " + str) + } + + /** Parameters associated to this MacroCompilerAnnotation. + * + * @param mem Path to memory lib + * @param memFormat Type of memory lib (Some("conf"), Some("mdf"), or None (defaults to mdf)) + * @param lib Path to library lib or None if no libraries + * @param hammerIR Path to HammerIR output or None (not generated in this case) + * @param costMetric Cost metric to use + * @param mode Compiler mode (see CompilerMode) + * @param forceCompile Set of memories to force compiling to lib regardless of the mode + * @param forceSynflops Set of memories to force compiling as flops regardless of the mode + */ + case class Params( + mem: String, + memFormat: Option[String], + lib: Option[String], + hammerIR: Option[String], + costMetric: CostMetric, + mode: CompilerMode, + useCompiler: Boolean, + forceCompile: Set[String], + forceSynflops: Set[String]) + extends Serializable + + /** Create a MacroCompilerAnnotation. + * @param c Top-level circuit name (see class description) + * @param p Parameters (see above). + */ + def apply(c: String, p: Params): MacroCompilerAnnotation = + MacroCompilerAnnotation(MacroCompilerUtil.objToString(p)) + +} + +class MacroCompilerPass( + mems: Option[Seq[Macro]], + libs: Option[Seq[Macro]], + compilers: Option[SRAMCompiler], + hammerIR: Option[String], + costMetric: CostMetric = CostMetric.default, + mode: MacroCompilerAnnotation.CompilerMode = MacroCompilerAnnotation.Default) + extends firrtl.passes.Pass { + // Helper function to check the legality of bitPairs. + // e.g. ((0,21), (22,43)) is legal + // ((0,21), (22,21)) is illegal and will throw an assert + private def checkBitPairs(bitPairs: Seq[(BigInt, BigInt)]): Unit = { + bitPairs.foldLeft(BigInt(-1))((lastBit, nextPair) => { + assert(lastBit + 1 == nextPair._1, s"Pair's first bit ${nextPair._1} does not follow last bit $lastBit") + assert(nextPair._2 >= nextPair._1, s"Pair $nextPair in bitPairs $bitPairs is illegal") + nextPair._2 + }) + } + + /** Calculate bit pairs. + * This is a list of submemories by width. + * The tuples are (lsb, msb) inclusive. + * Example: (0, 7) and (8, 15) might be a split for a width=16 memory into two width=8 target memories. + * Another example: (0, 3), (4, 7), (8, 11) may be a split for a width-12 memory into 3 width-4 target memories. + * + * @param mem Memory to compile + * @param lib Lib to compile with + * @return Bit pairs or empty list if there was an error. + */ + private def calculateBitPairs(mem: Macro, lib: Macro): Seq[(BigInt, BigInt)] = { + val pairedPorts = mem.sortedPorts.zip(lib.sortedPorts) + + val bitPairs = ArrayBuffer[(BigInt, BigInt)]() + var currentLSB: BigInt = 0 + + // Process every bit in the mem width. + for (memBit <- 0 until mem.src.width) { + val bitsInCurrentMem = memBit - currentLSB + + // We'll need to find a bitPair that works for *all* the ports of the memory. + // e.g. unmasked read port and masked write port. + // For each port, store a tentative candidate for the split. + // Afterwards, figure out which one to use. + val bitPairCandidates = ArrayBuffer[(BigInt, BigInt)]() + for ((memPort, libPort) <- pairedPorts) { + + // Sanity check to make sure we only split once per bit, once per port. + var alreadySplit: Boolean = false + + // Helper function to check if it's time to split memories. + // @param effectiveLibWidth Split memory when we have this many bits. + def splitMemory(effectiveLibWidth: Int): Unit = { + assert(!alreadySplit) + + if (bitsInCurrentMem == effectiveLibWidth) { + bitPairCandidates += ((currentLSB, memBit - 1)) + alreadySplit = true + } + } + + // Make sure we don't have a maskGran larger than the width of the memory. + assert(memPort.src.effectiveMaskGran <= memPort.src.width.get) + assert(libPort.src.effectiveMaskGran <= libPort.src.width.get) + + val libWidth = libPort.src.width.get + + // Don't consider cases of maskGran == width as "masked" since those masks + // effectively function as write-enable bits. + val memMask = if (memPort.src.effectiveMaskGran == memPort.src.width.get) None else memPort.src.maskGran + val libMask = if (libPort.src.effectiveMaskGran == libPort.src.width.get) None else libPort.src.maskGran + + (memMask, libMask) match { + // Neither lib nor mem is masked. + // No problems here. + case (None, None) => splitMemory(libWidth) + + // Only the lib is masked. + // Not an issue; we can just make all the bits in the lib mask enabled. + case (None, Some(_)) => splitMemory(libWidth) + + // Only the mem is masked. + case (Some(p), None) => + if (p % libPort.src.width.get == 0) { + // If the mem mask is a multiple of the lib width, then we're good. + // Just roll over every lib width as usual. + // e.g. lib width=4, mem maskGran={4, 8, 12, 16, ...} + splitMemory(libWidth) + } else if (libPort.src.width.get % p == 0) { + // Lib width is a multiple of the mem mask. + // Consider the case where mem mask = 4 but lib width = 8, unmasked. + // We can still compile, but will need to waste the extra bits. + splitMemory(memMask.get) + } else { + // No neat multiples. + // We might still be able to compile extremely inefficiently. + if (p < libPort.src.width.get) { + // Compile using mem mask as the effective width. (note that lib is not masked) + // e.g. mem mask = 3, lib width = 8 + splitMemory(memMask.get) + } else { + // e.g. mem mask = 13, lib width = 8 + System.err.println( + s"Unmasked target memory: unaligned mem maskGran $p with lib (${lib.src.name}) width ${libPort.src.width.get} not supported" + ) + return Seq() + } + } + + // Both lib and mem are masked. + case (Some(m), Some(l)) => + if (m == l) { + // Lib maskGran == mem maskGran, no problems + splitMemory(libWidth) + } else if (m > l) { + // Mem maskGran > lib maskGran + if (m % l == 0) { + // Mem maskGran is a multiple of lib maskGran, carry on as normal. + splitMemory(libWidth) + } else { + System.err.println(s"Mem maskGran $m is not a multiple of lib maskGran $l: currently not supported") + return Seq() + } + } else { // m < l + // Lib maskGran > mem maskGran. + if (l % m == 0) { + // Lib maskGran is a multiple of mem maskGran. + // e.g. lib maskGran = 8, mem maskGran = 4. + // In this case we can only compile very wastefully (by treating + // lib as a mem maskGran width memory) :( + splitMemory(memMask.get) + + // TODO: there's an optimization that could allow us to pack more + // bits in and be more efficient. + // e.g. say if mem maskGran = 4, lib maskGran = 8, libWidth = 32 + // We could use 16 of bit (bits 0-3, 8-11, 16-19, 24-27) instead + // of treating it as simply a width 4 (!!!) memory. + // This would require a major refactor though. + } else { + System.err.println(s"Lib maskGran $m is not a multiple of mem maskGran $l: currently not supported") + return Seq() + } + } + } + } + + // Choose an actual bit pair to add. + // We'll have to choose the smallest one (e.g. unmasked read port might be more tolerant of a bigger split than the masked write port). + if (bitPairCandidates.isEmpty) { + // No pair needed to split, just continue + } else { + val bestPair = bitPairCandidates.reduceLeft((leftPair, rightPair) => { + if (leftPair._2 - leftPair._1 + 1 > rightPair._2 - rightPair._1 + 1) leftPair else rightPair + }) + bitPairs += bestPair + currentLSB = bestPair._2 + BigInt(1) // advance the LSB pointer + } + } + // Add in the last chunk if there are any leftovers + bitPairs += ((currentLSB, mem.src.width - 1)) + + bitPairs + }.toSeq + + def compile(mem: Macro, lib: Macro): Option[(Module, Macro)] = { + assert( + mem.sortedPorts.lengthCompare(lib.sortedPorts.length) == 0, + "mem and lib should have an equal number of ports" + ) + val pairedPorts = mem.sortedPorts.zip(lib.sortedPorts) + + // Width mapping. See calculateBitPairs. + val bitPairs: Seq[(BigInt, BigInt)] = calculateBitPairs(mem, lib) + if (bitPairs.isEmpty) { + System.err.println("Error occurred during bitPairs calculations (bitPairs is empty).") + return None + } + // Check bit pairs. + checkBitPairs(bitPairs) + + // Depth mapping + val stmts = ArrayBuffer[Statement]() + val outputs = mutable.HashMap[String, ArrayBuffer[(Expression, Expression)]]() + val selects = mutable.HashMap[String, Expression]() + val selectRegs = mutable.HashMap[String, Expression]() + /* Palmer: If we've got a parallel memory then we've got to take the + * address bits into account. */ + if (mem.src.depth > lib.src.depth) { + mem.src.ports.foreach { port => + val high = MacroCompilerMath.ceilLog2(mem.src.depth) + val low = MacroCompilerMath.ceilLog2(lib.src.depth) + val ref = WRef(port.address.name) + val nodeName = s"${ref.name}_sel" + val tpe = UIntType(IntWidth(high - low)) + selects(ref.name) = WRef(nodeName, tpe) + stmts += DefNode(NoInfo, nodeName, bits(ref, high - 1, low)) + // Donggyu: output selection should be piped + if (port.output.isDefined) { + val regName = s"${ref.name}_sel_reg" + val enable = (port.chipEnable, port.readEnable) match { + case (Some(ce), Some(re)) => + and(WRef(ce.name, BoolType), WRef(re.name, BoolType)) + case (Some(ce), None) => WRef(ce.name, BoolType) + case (None, Some(re)) => WRef(re.name, BoolType) + case (None, None) => one + } + selectRegs(ref.name) = WRef(regName, tpe) + stmts += DefRegister(NoInfo, regName, tpe, WRef(port.clock.get.name), zero, WRef(regName)) + stmts += Connect(NoInfo, WRef(regName), Mux(enable, WRef(nodeName), WRef(regName), tpe)) + } + } + } + for ((_, i) <- BigInt(0).until(mem.src.depth, lib.src.depth).zipWithIndex) { + for (j <- bitPairs.indices) { + val name = s"mem_${i}_$j" + // Create the instance. + stmts += WDefInstance(NoInfo, name, lib.src.name, lib.tpe) + // Connect extra ports of the lib. + stmts ++= lib.extraPorts.map { case (portName, portValue) => + Connect(NoInfo, WSubField(WRef(name), portName), portValue) + } + } + for ((memPort, libPort) <- pairedPorts) { + val addrMatch = selects.get(memPort.src.address.name) match { + case None => one + case Some(addr) => + val index = UIntLiteral(i, IntWidth(bitWidth(addr.tpe))) + DoPrim(PrimOps.Eq, Seq(addr, index), Nil, index.tpe) + } + val addrMatchReg = selectRegs.get(memPort.src.address.name) match { + case None => one + case Some(reg) => + val index = UIntLiteral(i, IntWidth(bitWidth(reg.tpe))) + DoPrim(PrimOps.Eq, Seq(reg, index), Nil, index.tpe) + } + def andAddrMatch(e: Expression) = { + and(e, addrMatch) + } + val cats = ArrayBuffer[Expression]() + for (((low, high), j) <- bitPairs.zipWithIndex) { + val inst = WRef(s"mem_${i}_$j", lib.tpe) + + def connectPorts2(mem: Expression, lib: String, polarity: Option[PortPolarity]): Statement = + Connect(NoInfo, WSubField(inst, lib), portToExpression(mem, polarity)) + def connectPorts(mem: Expression, lib: String, polarity: PortPolarity): Statement = + connectPorts2(mem, lib, Some(polarity)) + + // Clock port mapping + /* Palmer: FIXME: I don't handle memories with read/write clocks yet. */ + /* Colin not all libPorts have clocks but all memPorts do*/ + libPort.src.clock.foreach { cPort => + stmts += connectPorts(WRef(memPort.src.clock.get.name), cPort.name, cPort.polarity) + } + + // Adress port mapping + /* Palmer: The address port to a memory is just the low-order bits of + * the top address. */ + stmts += connectPorts(WRef(memPort.src.address.name), libPort.src.address.name, libPort.src.address.polarity) + + // Output port mapping + (memPort.src.output, libPort.src.output) match { + case (Some(PolarizedPort(mem, _)), Some(PolarizedPort(lib, lib_polarity))) => + /* Palmer: In order to produce the output of a memory we need to cat + * together a bunch of narrower memories, which can only be + * done after generating all the memories. This saves up the + * output statements for later. */ + val name = s"${mem}_${i}_$j" // This name is the output from the instance (mem vs ${mem}). + val exp = portToExpression(bits(WSubField(inst, lib), high - low, 0), Some(lib_polarity)) + stmts += DefNode(NoInfo, name, exp) + cats += WRef(name) + case (None, Some(_)) => + /* Palmer: If the inner memory has an output port but the outer + * one doesn't then it's safe to just leave the outer + * port floating. */ + case (None, None) => + /* Palmer: If there's no output ports at all (ie, read-only + * port on the memory) then just don't worry about it, + * there's nothing to do. */ + case (Some(PolarizedPort(mem, _)), None) => + System.err.println("WARNING: Unable to match output ports on memory") + System.err.println(s" outer output port: $mem") + return None + } + + // Input port mapping + (memPort.src.input, libPort.src.input) match { + case (Some(PolarizedPort(mem, _)), Some(PolarizedPort(lib, lib_polarity))) => + /* Palmer: The input port to a memory just needs to happen in parallel, + * this does a part select to narrow the memory down. */ + stmts += connectPorts(bits(WRef(mem), high, low), lib, lib_polarity) + case (None, Some(lib)) => + /* Palmer: If the inner memory has an input port but the other + * one doesn't then it's safe to just leave the inner + * port floating. This should be handled by the + * default value of the write enable, so nothing should + * every make it into the memory. */ + //Firrtl cares about dangling inputs now tie it off + stmts += IsInvalid(NoInfo, WSubField(inst, lib.name)) + case (None, None) => + /* Palmer: If there's no input ports at all (ie, read-only + * port on the memory) then just don't worry about it, + * there's nothing to do. */ + case (Some(PolarizedPort(mem, _)), None) => + System.err.println("WARNING: Unable to match input ports on memory") + System.err.println(s" outer input port: $mem") + return None + } + + // Mask port mapping + val memMask = memPort.src.maskPort match { + case Some(PolarizedPort(mem, _)) => + /* Palmer: The bits from the outer memory's write mask that will be + * used as the write mask for this inner memory. */ + if (libPort.src.effectiveMaskGran == libPort.src.width.get) { + bits(WRef(mem), low / memPort.src.effectiveMaskGran) + } else { + require(isPowerOfTwo(libPort.src.effectiveMaskGran), "only powers of two masks supported for now") + + // How much of this lib's width we are effectively using. + // If we have a mem maskGran less than the lib's maskGran, we'll have to take the smaller maskGran. + // Example: if we have a lib whose maskGran is 8 but our mem's maskGran is 4. + // The other case is if we're using a larger lib than mem. + val usingLessThanLibMaskGran = memPort.src.maskGran.get < libPort.src.effectiveMaskGran + val effectiveLibWidth = + if (usingLessThanLibMaskGran) + memPort.src.maskGran.get + else + libPort.src.width.get + + cat( + (0 until libPort.src.width.get by libPort.src.effectiveMaskGran) + .map(i => { + if (usingLessThanLibMaskGran && i >= effectiveLibWidth) { + // If the memMaskGran is smaller than the lib's gran, then + // zero out the upper bits. + zero + } else { + if ((low + i) >= memPort.src.width.get) { + // If our bit is larger than the whole width of the mem, just zero out the upper bits. + zero + } else { + // Pick the appropriate bit from the mem mask. + bits(WRef(mem), (low + i) / memPort.src.effectiveMaskGran) + } + } + }) + .reverse + ) + } + case None => + /* If there is a lib mask port but no mem mask port, just turn on + * all bits of the lib mask port. */ + if (libPort.src.maskPort.isDefined) { + val width = libPort.src.width.get / libPort.src.effectiveMaskGran + val value = (BigInt(1) << width) - 1 + UIntLiteral(value, IntWidth(width)) + } else { + // No mask ports on either side. + // We treat a "mask" of a single bit to be equivalent to a write + // enable (as used below). + one + } + } + + // Write enable port mapping + val memWriteEnable = memPort.src.writeEnable match { + case Some(PolarizedPort(mem, _)) => + /* Palmer: The outer memory's write enable port, or a constant 1 if + * there isn't a write enable port. */ + WRef(mem) + case None => + /* Palmer: If there is no input port on the source memory port + * then we don't ever want to turn on this write + * enable. Otherwise, we just _always_ turn on the + * write enable port on the inner memory. */ + if (memPort.src.input.isEmpty) zero else one + } + + // Chip enable port mapping + val memChipEnable = memPort.src.chipEnable match { + case Some(PolarizedPort(mem, _)) => WRef(mem) + case None => one + } + + // Read enable port mapping + /* Palmer: It's safe to ignore read enables, but we pass them through + * to the vendor memory if there's a port on there that + * implements the read enables. */ + (memPort.src.readEnable, libPort.src.readEnable) match { + case (_, None) => + case (Some(PolarizedPort(mem, _)), Some(PolarizedPort(lib, lib_polarity))) => + stmts += connectPorts(andAddrMatch(WRef(mem)), lib, lib_polarity) + case (None, Some(PolarizedPort(lib, lib_polarity))) => + stmts += connectPorts(andAddrMatch(and(not(memWriteEnable), memChipEnable)), lib, lib_polarity) + } + + /* Palmer: This is actually the memory compiler: it figures out how to + * implement the outer memory's collection of ports using what + * the inner memory has availiable. */ + ((libPort.src.maskPort, libPort.src.writeEnable, libPort.src.chipEnable): @unchecked) match { + case ( + Some(PolarizedPort(mask, mask_polarity)), + Some(PolarizedPort(we, we_polarity)), + Some(PolarizedPort(en, en_polarity)) + ) => + /* Palmer: This is the simple option: every port exists. */ + stmts += connectPorts(memMask, mask, mask_polarity) + stmts += connectPorts(andAddrMatch(memWriteEnable), we, we_polarity) + stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) + case (Some(PolarizedPort(mask, mask_polarity)), Some(PolarizedPort(we, we_polarity)), None) => + /* Palmer: If we don't have a chip enable but do have mask ports. */ + stmts += connectPorts(memMask, mask, mask_polarity) + stmts += connectPorts(andAddrMatch(and(memWriteEnable, memChipEnable)), we, we_polarity) + case (None, Some(PolarizedPort(we, we_polarity)), chipEnable) => + if (bitWidth(memMask.tpe) == 1) { + /* Palmer: If we're expected to provide mask ports without a + * memory that actually has them then we can use the + * write enable port instead of the mask port. */ + chipEnable match { + case Some(PolarizedPort(en, en_polarity)) => + stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), we, we_polarity) + stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) + case _ => + stmts += connectPorts( + andAddrMatch(and(and(memWriteEnable, memChipEnable), memMask)), + we, + we_polarity + ) + } + } else { + System.err.println("cannot emulate multi-bit mask ports with write enable") + return None + } + case (None, None, None) => + // No write ports to match up (this may be a read-only port). + // This isn't necessarily an error condition. + } + } + // Cat macro outputs for selection + memPort.src.output match { + case Some(PolarizedPort(mem, _)) if cats.nonEmpty => + val name = s"${mem}_$i" + stmts += DefNode(NoInfo, name, cat(cats.toSeq.reverse)) + outputs.getOrElseUpdate(mem, ArrayBuffer[(Expression, Expression)]()) += + (addrMatchReg -> WRef(name)) + case _ => + } + } + } + // Connect mem outputs + val zeroOutputValue: Expression = UIntLiteral(0, IntWidth(mem.src.width)) + mem.src.ports.foreach { port => + port.output match { + case Some(PolarizedPort(mem, _)) => + outputs.get(mem) match { + case Some(select) => + val output = select.foldRight(zeroOutputValue) { case ((cond, tval), fval) => + Mux(cond, tval, fval, fval.tpe) + } + stmts += Connect(NoInfo, WRef(mem), output) + case None => + } + case None => + } + } + + Some((mem.module(Block(stmts.toSeq)), lib)) + } + + def run(c: Circuit): Circuit = { + var firstLib = true + val modules = (mems, libs) match { + case (Some(mems), Some(libs)) => + // Try to compile each of the memories in mems. + // The 'state' is c.modules, which is a list of all the firrtl modules + // in the 'circuit'. + mems.foldLeft(c.modules) { (modules, mem) => + val sram = mem.src + def groupMatchesMask(group: SRAMGroup, mem: SRAMMacro): Boolean = { + val memMask = mem.ports.map(_.maskGran).find(_.isDefined).flatten + val libMask = group.ports.map(_.maskGran).find(_.isDefined).flatten + (memMask, libMask) match { + case (None, _) => true + case (Some(_), None) => false + case (Some(m), Some(l)) => l <= m //Ignore memories that don't have nice mask + } + } + // Add compiler memories that might map well to libs + val compLibs = compilers match { + case Some(SRAMCompiler(_, groups)) => + groups + .filter(g => g.family == sram.family && groupMatchesMask(g, sram)) + .map(g => { + for { + w <- g.width + d <- g.depth if (sram.width % w == 0) && (sram.depth % d == 0) + } yield Seq(new Macro(buildSRAMMacro(g, d, w, g.vt.head))) + }) + case None => Seq() + } + val fullLibs = libs ++ compLibs.flatten.flatten + + // Try to compile mem against each lib in libs, keeping track of the + // best compiled version, external lib used, and cost. + val (best, _) = fullLibs.foldLeft(None: Option[(Module, Macro)], Double.MaxValue) { + case ((best, cost), lib) if mem.src.ports.size != lib.src.ports.size => + /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same + * order, but I'm starting with what actually gets generated. */ + System.err.println(s"INFO: unable to compile ${mem.src.name} using ${lib.src.name} port count must match") + (best, cost) + case ((best, cost), lib) => + // Run the cost function to evaluate this potential compile. + costMetric.cost(mem, lib) match { + case Some(newCost) => + //System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") + // Try compiling + compile(mem, lib) match { + // If it was successful and the new cost is lower + case Some(p) if newCost < cost => (Some(p), newCost) + case _ => (best, cost) + } + case _ => (best, cost) // Cost function rejected this combination. + } + } + + // If we were able to compile anything, then replace the original module + // in the modules list with a compiled version, as well as the extmodule + // stub for the lib. + best match { + case None => + if (mode == MacroCompilerAnnotation.Strict) + throw MacroCompilerException( + s"Target memory ${mem.src.name} could not be compiled and strict mode is activated - aborting." + ) + else + modules + case Some((mod, bb)) => + hammerIR match { + case Some(f) => + val hammerIRWriter = new FileWriter(new File(f), !firstLib) + if (firstLib) hammerIRWriter.write("[\n") + hammerIRWriter.write(bb.src.toJSON().toString()) + hammerIRWriter.write("\n,\n") + hammerIRWriter.close() + firstLib = false + case None => + } + modules.filterNot(m => m.name == mod.name || m.name == bb.blackbox.name) ++ Seq(mod, bb.blackbox) + } + } + case _ => c.modules + } + c.copy(modules = modules) + } +} + +class MacroCompilerTransform extends Transform with DependencyAPIMigration { + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def optionalPrerequisiteOf: Seq[Dependency[Emitter]] = Forms.LowEmitters + override def invalidates(a: Transform) = false + + def execute(state: CircuitState): CircuitState = state.annotations.collect { case a: MacroCompilerAnnotation => + a + } match { + case Seq(anno: MacroCompilerAnnotation) => + val MacroCompilerAnnotation.Params( + memFile, + memFileFormat, + libFile, + hammerIR, + costMetric, + mode, + useCompiler, + forceCompile, + forceSynflops + ) = anno.params + if (mode == MacroCompilerAnnotation.FallbackSynflops) { + throw new UnsupportedOperationException("Not implemented yet") + } + + // Check that we don't have any modules both forced to compile and synflops. + assert(forceCompile.intersect(forceSynflops).isEmpty, "Cannot have modules both forced to compile and synflops") + + // Read, eliminate None, get only SRAM, make firrtl macro + val mems: Option[Seq[Macro]] = (memFileFormat match { + case Some("conf") => readConfFromPath(Some(memFile)) + case _ => mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) + }) match { + case Some(x: Seq[mdf.macrolib.Macro]) => + Some(filterForSRAM(Some(x)).getOrElse(List()).map { new Macro(_) }) + case _ => None + } + val libs: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { + case Some(x: Seq[mdf.macrolib.Macro]) => + Some(filterForSRAM(Some(x)).getOrElse(List()).map { new Macro(_) }) + case _ => None + } + val compilers: Option[mdf.macrolib.SRAMCompiler] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { + case Some(x: Seq[mdf.macrolib.Macro]) => + if (useCompiler) { + findSRAMCompiler(Some(x)) + } else None + case _ => None + } + + // Helper function to turn a set of mem names into a Seq[Macro]. + def setToSeqMacro(names: Set[String]): Seq[Macro] = { + names.toSeq.map(memName => mems.get.collectFirst { case m if m.src.name == memName => m }.get) + } + + // Build lists of memories for compilation and synflops. + val memCompile = mems.map { actualMems => + val memsAdjustedForMode = if (mode == MacroCompilerAnnotation.Synflops) Seq.empty else actualMems + memsAdjustedForMode.filterNot(m => forceSynflops.contains(m.src.name)) ++ setToSeqMacro(forceCompile) + } + val memSynflops: Seq[Macro] = mems.map { actualMems => + // + val memsAdjustedForMode = if (mode == MacroCompilerAnnotation.Synflops) actualMems else Seq.empty + memsAdjustedForMode.filterNot(m => forceCompile.contains(m.src.name)) ++ setToSeqMacro(forceSynflops) + }.getOrElse(Seq.empty) + + val transforms = Seq( + new MacroCompilerPass(memCompile, libs, compilers, hammerIR, costMetric, mode), + new SynFlopsPass( + true, + memSynflops ++ (if (mode == MacroCompilerAnnotation.CompileAndSynflops) { + libs.get + } else { + Seq.empty + }) + ) + ) + transforms.foldLeft(state)((s, xform) => xform.runTransform(s)) + case _ => state + } +} + +class MacroCompilerOptimizations extends SeqTransform with DependencyAPIMigration { + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def optionalPrerequisiteOf: Seq[Dependency[Emitter]] = Forms.LowEmitters + override def invalidates(a: Transform) = false + + def transforms: Seq[Transform] = Seq( + passes.RemoveValidIf, + new firrtl.transforms.ConstantPropagation, + passes.memlib.VerilogMemDelays, + new firrtl.transforms.ConstantPropagation, + passes.SplitExpressions, + passes.CommonSubexpressionElimination + ) +} + +object MacroCompiler extends App { + sealed trait MacroParam + case object Macros extends MacroParam + case object MacrosFormat extends MacroParam + case object Library extends MacroParam + case object Verilog extends MacroParam + case object Firrtl extends MacroParam + case object HammerIR extends MacroParam + case object CostFunc extends MacroParam + case object Mode extends MacroParam + case object UseCompiler extends MacroParam + + type MacroParamMap = Map[MacroParam, String] + type CostParamMap = Map[String, String] + type ForcedMemories = (Set[String], Set[String]) + val modeOptions: Seq[String] = MacroCompilerAnnotation.options.map { case (_, cmd, description) => + s" $cmd: $description" + } + val usage: String = (Seq( + "Options:", + " -n, --macro-conf: The set of macros to compile in firrtl-generated conf format (exclusive with -m)", + " -m, --macro-mdf: The set of macros to compile in MDF JSON format (exclusive with -n)", + " -l, --library: The set of macros that have blackbox instances", + " -u, --use-compiler: Flag, whether to use the memory compiler defined in library", + " -v, --verilog: Verilog output", + " -f, --firrtl: FIRRTL output (optional)", + " -hir, --hammer-ir: Hammer-IR output currently only needed for IP compilers", + " -c, --cost-func: Cost function to use. Optional (default: \"default\")", + " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", + " --force-compile [mem]: Force the given memory to be compiled to target libs regardless of the mode", + " --force-synflops [mem]: Force the given memory to be compiled via synflops regardless of the mode", + " --mode:" + ) ++ modeOptions).mkString("\n") + + @tailrec + def parseArgs( + map: MacroParamMap, + costMap: CostParamMap, + forcedMemories: ForcedMemories, + args: List[String] + ): (MacroParamMap, CostParamMap, ForcedMemories) = + args match { + case Nil => (map, costMap, forcedMemories) + case ("-n" | "--macro-conf") :: value :: tail => + parseArgs(map + (Macros -> value) + (MacrosFormat -> "conf"), costMap, forcedMemories, tail) + case ("-m" | "--macro-mdf") :: value :: tail => + parseArgs(map + (Macros -> value) + (MacrosFormat -> "mdf"), costMap, forcedMemories, tail) + case ("-l" | "--library") :: value :: tail => + parseArgs(map + (Library -> value), costMap, forcedMemories, tail) + case ("-u" | "--use-compiler") :: tail => + parseArgs(map + (UseCompiler -> ""), costMap, forcedMemories, tail) + case ("-v" | "--verilog") :: value :: tail => + parseArgs(map + (Verilog -> value), costMap, forcedMemories, tail) + case ("-f" | "--firrtl") :: value :: tail => + parseArgs(map + (Firrtl -> value), costMap, forcedMemories, tail) + case ("-hir" | "--hammer-ir") :: value :: tail => + parseArgs(map + (HammerIR -> value), costMap, forcedMemories, tail) + case ("-c" | "--cost-func") :: value :: tail => + parseArgs(map + (CostFunc -> value), costMap, forcedMemories, tail) + case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => + parseArgs(map, costMap + (value1 -> value2), forcedMemories, tail) + case "--force-compile" :: value :: tail => + parseArgs(map, costMap, forcedMemories.copy(_1 = forcedMemories._1 + value), tail) + case "--force-synflops" :: value :: tail => + parseArgs(map, costMap, forcedMemories.copy(_2 = forcedMemories._2 + value), tail) + case "--mode" :: value :: tail => + parseArgs(map + (Mode -> value), costMap, forcedMemories, tail) + case arg :: _ => + println(s"Unknown field $arg\n") + println(usage) + sys.exit(1) + } + + def run(args: List[String]): Unit = { + val (params, costParams, forcedMemories) = + parseArgs(Map[MacroParam, String](), Map[String, String](), (Set.empty, Set.empty), args) + try { + val macros = params.get(MacrosFormat) match { + case Some("conf") => + filterForSRAM(readConfFromPath(params.get(Macros))).get.map(x => new Macro(x).blackbox) + case _ => + filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get + .map(x => new Macro(x).blackbox) + } + + if (macros.nonEmpty) { + // Note: the last macro in the input list is (seemingly arbitrarily) + // determined as the firrtl "top-level module". + val circuit = Circuit(NoInfo, macros, macros.last.name) + val annotations = AnnotationSeq( + Seq( + MacroCompilerAnnotation( + circuit.main, + MacroCompilerAnnotation.Params( + params(Macros), + params.get(MacrosFormat), + params.get(Library), + params.get(HammerIR), + CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), + MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")), + params.contains(UseCompiler), + forceCompile = forcedMemories._1, + forceSynflops = forcedMemories._2 + ) + ) + ) + ) + + // The actual MacroCompilerTransform basically just generates an input circuit + val macroCompilerInput = CircuitState(circuit, annotations) + val macroCompiled = (new MacroCompilerTransform).execute(macroCompilerInput) + + // Run FIRRTL compiler + // For each generated module, have to create a new circuit with that module + // as top, and all other modules as ExtModules. This guarantees all modules + // are elaborated + val verilog = macroCompiled.circuit.modules + .map(_.name) + .map { macroName => + val (mainMod, otherMods) = macroCompiled.circuit.modules.partition(_.name == macroName) + val extMods = otherMods.map(m => ExtModule(NoInfo, m.name, m.ports, m.name, Nil)) + + val circuit = Circuit(NoInfo, mainMod ++ extMods, macroName) + (new FirrtlStage) + .execute( + Array.empty, + Seq( + OutputFileAnnotation(params.get(Verilog).get), + RunFirrtlTransformAnnotation(new VerilogEmitter), + EmitCircuitAnnotation(classOf[VerilogEmitter]), + FirrtlSourceAnnotation(circuit.serialize) + ) + ) + .collect { case c: EmittedVerilogCircuitAnnotation => c } + .head + .value + .value + } + .mkString("\n") + + val verilogWriter = new FileWriter(new File(params.get(Verilog).get)) + verilogWriter.write(verilog) + verilogWriter.close() + + params.get(HammerIR) match { + case Some(hammerIRFile: String) => + val lines = FileUtils.getLines(hammerIRFile).toList + val hammerIRWriter = new FileWriter(new File(hammerIRFile)) + // JSON means we need to destroy the last comma :( + lines.dropRight(1).foreach(l => hammerIRWriter.write(l + "\n")) + hammerIRWriter.write("]\n") + hammerIRWriter.close() + case None => + } + } else { + // Warn user + System.err.println("WARNING: Empty *.mems.conf file. No memories generated.") + + // Emit empty verilog file if no macros found + params.get(Verilog) match { + case Some(verilogFile: String) => + // Create an empty verilog file + val verilogWriter = new FileWriter(new File(verilogFile)) + verilogWriter.close() + case None => + } + params.get(HammerIR) match { + case Some(hammerIRFile: String) => + // Create an empty HammerIR file + val hammerIRWriter = new FileWriter(new File(hammerIRFile)) + hammerIRWriter.write("[]\n") + hammerIRWriter.close() + case None => + } + } + } catch { + case e: java.util.NoSuchElementException => + if (args.isEmpty) { + println("Command line arguments must be specified") + } else { + e.printStackTrace() + } + e.printStackTrace() + sys.exit(1) + case e: MacroCompilerException => + println(usage) + e.printStackTrace() + sys.exit(1) + case e: Throwable => + throw e + } + } + + run(args.toList) +} diff --git a/tools/tapeout/src/main/scala/macros/SynFlopsPass.scala b/tools/tapeout/src/main/scala/macros/SynFlopsPass.scala new file mode 100644 index 000000000..0c1dd8043 --- /dev/null +++ b/tools/tapeout/src/main/scala/macros/SynFlopsPass.scala @@ -0,0 +1,152 @@ +// See LICENSE for license details. + +package tapeout.macros + +import tapeout.macros.Utils._ +import firrtl.Utils.{one, zero} +import firrtl._ +import firrtl.ir._ +import firrtl.passes.MemPortUtils.memPortField + +import scala.collection.mutable + +class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pass { + val extraMods: mutable.ArrayBuffer[Module] = scala.collection.mutable.ArrayBuffer.empty[Module] + lazy val libMods: Map[String, Module] = libs.map { lib => + lib.src.name -> { + val (dataType, dataWidth) = lib.src.ports.foldLeft(None: Option[BigInt])((res, port) => + (res, port.maskPort) match { + case (_, None) => + res + case (None, Some(_)) => + Some(port.effectiveMaskGran) + case (Some(x), Some(_)) => + assert(x == port.effectiveMaskGran) + res + } + ) match { + case None => (UIntType(IntWidth(lib.src.width)), lib.src.width) + case Some(gran) => (UIntType(IntWidth(gran)), gran.intValue) + } + + val maxDepth = firrtl.Utils.min(lib.src.depth, 1 << 26) + + // Change macro to be mapped onto to look like the below mem + // by changing its depth, and width + val lib_macro = new Macro( + lib.src.copy( + name = "split_" + lib.src.name, + depth = maxDepth, + width = dataWidth, + ports = lib.src.ports.map(p => + p.copy( + width = p.width.map(_ => dataWidth), + depth = p.depth.map(_ => maxDepth), + maskGran = p.maskGran.map(_ => dataWidth) + ) + ) + ) + ) + val mod_macro = new MacroCompilerPass(None, None, None, None).compile(lib, lib_macro) + val (real_mod, real_macro) = mod_macro.get + + val mem = DefMemory( + NoInfo, + "ram", + dataType, + maxDepth, + 1, // writeLatency + 1, // readLatency. This is possible because of VerilogMemDelays + real_macro.readers.indices.map(i => s"R_$i"), + real_macro.writers.indices.map(i => s"W_$i"), + real_macro.readwriters.indices.map(i => s"RW_$i") + ) + + val readConnects = real_macro.readers.zipWithIndex.flatMap { case (r, i) => + val clock = portToExpression(r.src.clock.get) + val address = portToExpression(r.src.address) + val enable = (r.src.chipEnable, r.src.readEnable) match { + case (Some(en_port), Some(re_port)) => + and(portToExpression(en_port), portToExpression(re_port)) + case (Some(en_port), None) => portToExpression(en_port) + case (None, Some(re_port)) => portToExpression(re_port) + case (None, None) => one + } + val data = memPortField(mem, s"R_$i", "data") + val read = data + Seq( + Connect(NoInfo, memPortField(mem, s"R_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), address), + Connect(NoInfo, memPortField(mem, s"R_$i", "en"), enable), + Connect(NoInfo, WRef(r.src.output.get.name), read) + ) + } + + val writeConnects = real_macro.writers.zipWithIndex.flatMap { case (w, i) => + val clock = portToExpression(w.src.clock.get) + val address = portToExpression(w.src.address) + val enable = (w.src.chipEnable, w.src.writeEnable) match { + case (Some(en), Some(we)) => + and(portToExpression(en), portToExpression(we)) + case (Some(en), None) => portToExpression(en) + case (None, Some(we)) => portToExpression(we) + case (None, None) => zero // is it possible? + } + val mask = w.src.maskPort match { + case Some(m) => portToExpression(m) + case None => one + } + val data = memPortField(mem, s"W_$i", "data") + val write = portToExpression(w.src.input.get) + Seq( + Connect(NoInfo, memPortField(mem, s"W_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"W_$i", "addr"), address), + Connect(NoInfo, memPortField(mem, s"W_$i", "en"), enable), + Connect(NoInfo, memPortField(mem, s"W_$i", "mask"), mask), + Connect(NoInfo, data, write) + ) + } + + val readwriteConnects = real_macro.readwriters.zipWithIndex.flatMap { case (rw, i) => + val clock = portToExpression(rw.src.clock.get) + val address = portToExpression(rw.src.address) + val wmode = rw.src.writeEnable match { + case Some(we) => portToExpression(we) + case None => zero // is it possible? + } + val wmask = rw.src.maskPort match { + case Some(wm) => portToExpression(wm) + case None => one + } + val enable = (rw.src.chipEnable, rw.src.readEnable) match { + case (Some(en), Some(re)) => + and(portToExpression(en), or(portToExpression(re), wmode)) + case (Some(en), None) => portToExpression(en) + case (None, Some(re)) => or(portToExpression(re), wmode) + case (None, None) => one + } + val wdata = memPortField(mem, s"RW_$i", "wdata") + val rdata = memPortField(mem, s"RW_$i", "rdata") + val write = portToExpression(rw.src.input.get) + val read = rdata + Seq( + Connect(NoInfo, memPortField(mem, s"RW_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"RW_$i", "addr"), address), + Connect(NoInfo, memPortField(mem, s"RW_$i", "en"), enable), + Connect(NoInfo, memPortField(mem, s"RW_$i", "wmode"), wmode), + Connect(NoInfo, memPortField(mem, s"RW_$i", "wmask"), wmask), + Connect(NoInfo, WRef(rw.src.output.get.name), read), + Connect(NoInfo, wdata, write) + ) + } + + extraMods.append(real_macro.module(Block(mem +: (readConnects ++ writeConnects ++ readwriteConnects)))) + real_mod + } + }.toMap + + def run(c: Circuit): Circuit = { + if (!synflops) c + else c.copy(modules = c.modules.map(m => libMods.getOrElse(m.name, m)) ++ extraMods) + } +} diff --git a/tools/tapeout/src/main/scala/macros/Utils.scala b/tools/tapeout/src/main/scala/macros/Utils.scala new file mode 100644 index 000000000..8715ec105 --- /dev/null +++ b/tools/tapeout/src/main/scala/macros/Utils.scala @@ -0,0 +1,262 @@ +// See LICENSE for license details. + +package tapeout.macros + +import firrtl.Utils.BoolType +import firrtl.ir._ +import firrtl.passes.memlib._ +import firrtl.{PrimOps, _} +import mdf.macrolib.{Input => _, Output => _, _} + +import scala.language.implicitConversions + +object MacroCompilerMath { + def ceilLog2(x: BigInt): Int = (x - 1).bitLength +} + +class FirrtlMacroPort(port: MacroPort) { + val src: MacroPort = port + + val isReader: Boolean = port.output.nonEmpty && port.input.isEmpty + val isWriter: Boolean = port.input.nonEmpty && port.output.isEmpty + val isReadWriter: Boolean = port.input.nonEmpty && port.output.nonEmpty + + val addrType: UIntType = UIntType(IntWidth(MacroCompilerMath.ceilLog2(port.depth.get).max(1))) + val dataType: UIntType = UIntType(IntWidth(port.width.get)) + val maskType: UIntType = UIntType(IntWidth(port.width.get / port.effectiveMaskGran)) + + // Bundle representing this macro port. + val tpe: BundleType = BundleType( + Seq(Field(port.address.name, Flip, addrType)) ++ + port.clock.map(p => Field(p.name, Flip, ClockType)) ++ + port.input.map(p => Field(p.name, Flip, dataType)) ++ + port.output.map(p => Field(p.name, Default, dataType)) ++ + port.chipEnable.map(p => Field(p.name, Flip, BoolType)) ++ + port.readEnable.map(p => Field(p.name, Flip, BoolType)) ++ + port.writeEnable.map(p => Field(p.name, Flip, BoolType)) ++ + port.maskPort.map(p => Field(p.name, Flip, maskType)) + ) + val ports: Seq[Port] = tpe.fields.map(f => + Port( + NoInfo, + f.name, + f.flip match { + case Default => Output + case Flip => Input + }, + f.tpe + ) + ) +} + +// Reads an SRAMMacro and generates firrtl blackboxes. +class Macro(srcMacro: SRAMMacro) { + val src: SRAMMacro = srcMacro + + val firrtlPorts: Seq[FirrtlMacroPort] = srcMacro.ports.map { new FirrtlMacroPort(_) } + + val writers: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isWriter) + val readers: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isReader) + val readwriters: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isReadWriter) + + val sortedPorts: Seq[FirrtlMacroPort] = writers ++ readers ++ readwriters + val extraPorts: Seq[(String, UIntLiteral)] = srcMacro.extraPorts.map { p => + assert(p.portType == Constant) // TODO: release it? + val name = p.name + val width = BigInt(p.width.toLong) + val value = BigInt(p.value.toLong) + name -> UIntLiteral(value, IntWidth(width)) + } + + // Bundle representing this memory blackbox + val tpe: BundleType = BundleType(firrtlPorts.flatMap(_.tpe.fields)) + + private val modPorts = firrtlPorts.flatMap(_.ports) ++ + extraPorts.map { case (name, value) => Port(NoInfo, name, Input, value.tpe) } + val blackbox: ExtModule = ExtModule(NoInfo, srcMacro.name, modPorts, srcMacro.name, Nil) + def module(body: Statement): Module = Module(NoInfo, srcMacro.name, modPorts, body) +} + +object Utils { + def filterForSRAM(s: Option[Seq[mdf.macrolib.Macro]]): Option[Seq[mdf.macrolib.SRAMMacro]] = { + s match { + case Some(l: Seq[mdf.macrolib.Macro]) => + Some(l.filter { _.isInstanceOf[mdf.macrolib.SRAMMacro] }.map { m => m.asInstanceOf[mdf.macrolib.SRAMMacro] }) + case _ => None + } + } + // This utility reads a conf in and returns MDF like mdf.macrolib.Utils.readMDFFromPath + def readConfFromPath(path: Option[String]): Option[Seq[mdf.macrolib.Macro]] = { + path.map(p => Utils.readConfFromString(FileUtils.getText(p))) + } + def readConfFromString(str: String): Seq[mdf.macrolib.Macro] = { + MemConf.fromString(str).map { m: MemConf => + val ports = m.ports.map { case (port, num) => Seq.fill(num)(port) }.reduce(_ ++ _) + SRAMMacro( + m.name, + m.width, + m.depth, + Utils.portSpecToFamily(ports), + Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, ports) + ) + } + } + def portSpecToFamily(ports: Seq[MemPort]): String = { + val numR = ports.count { case ReadPort => true; case _ => false } + val numW = ports.count { case WritePort | MaskedWritePort => true; case _ => false } + val numRW = ports.count { case ReadWritePort | MaskedReadWritePort => true; case _ => false } + val numRStr = if (numR > 0) s"${numR}r" else "" + val numWStr = if (numW > 0) s"${numW}w" else "" + val numRWStr = if (numRW > 0) s"${numRW}rw" else "" + numRStr + numWStr + numRWStr + } + // This translates between two represenations of ports + def portSpecToMacroPort(width: Int, depth: BigInt, maskGran: Option[Int], ports: Seq[MemPort]): Seq[MacroPort] = { + var numR = 0 + var numW = 0 + var numRW = 0 + ports.map { + case ReadPort => + val portName = s"R$numR" + numR += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + readEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + case WritePort => + val portName = s"W$numW" + numW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + writeEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + input = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + case MaskedWritePort => + val portName = s"W$numW" + numW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + writeEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + maskPort = Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), + maskGran = maskGran, + input = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + case ReadWritePort => + val portName = s"RW$numRW" + numRW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + chipEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + writeEnable = Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), + input = Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) + ) + case MaskedReadWritePort => + val portName = s"RW$numRW" + numRW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + chipEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + writeEnable = Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), + maskPort = Some(PolarizedPort(s"${portName}_wmask", ActiveHigh)), + maskGran = maskGran, + input = Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) + ) + } + } + def findSRAMCompiler(s: Option[Seq[mdf.macrolib.Macro]]): Option[mdf.macrolib.SRAMCompiler] = { + s match { + case Some(l: Seq[mdf.macrolib.Macro]) => + l.collectFirst { case x: mdf.macrolib.SRAMCompiler => + x + } + case _ => None + } + } + def buildSRAMMacros(s: mdf.macrolib.SRAMCompiler): Seq[mdf.macrolib.SRAMMacro] = { + for { + g <- s.groups + d <- g.depth + w <- g.width + vt <- g.vt + } yield mdf.macrolib.SRAMMacro( + makeName(g, d, w, vt), + w, + d, + g.family, + g.ports.map(_.copy(width = Some(w), depth = Some(d))), + vt, + g.mux, + g.extraPorts + ) + } + def buildSRAMMacro(g: mdf.macrolib.SRAMGroup, d: Int, w: Int, vt: String): mdf.macrolib.SRAMMacro = { + mdf.macrolib.SRAMMacro( + makeName(g, d, w, vt), + w, + d, + g.family, + g.ports.map(_.copy(width = Some(w), depth = Some(d))), + vt, + g.mux, + g.extraPorts + ) + } + def makeName(g: mdf.macrolib.SRAMGroup, depth: Int, width: Int, vt: String): String = { + g.name.foldLeft("") { (builder, next) => + next match { + case "depth" | "DEPTH" => builder + depth + case "width" | "WIDTH" => builder + width + case "vt" => builder + vt.toLowerCase + case "VT" => builder + vt.toUpperCase + case "family" => builder + g.family.toLowerCase + case "FAMILY" => builder + g.family.toUpperCase + case "mux" | "MUX" => builder + g.mux + case other => builder + other + } + } + } + + def and(e1: Expression, e2: Expression): DoPrim = + DoPrim(PrimOps.And, Seq(e1, e2), Nil, e1.tpe) + def or(e1: Expression, e2: Expression): DoPrim = + DoPrim(PrimOps.Or, Seq(e1, e2), Nil, e1.tpe) + def bits(e: Expression, high: BigInt, low: BigInt): Expression = + DoPrim(PrimOps.Bits, Seq(e), Seq(high, low), UIntType(IntWidth(high - low + 1))) + def bits(e: Expression, idx: BigInt): Expression = bits(e, idx, idx) + def cat(es: Seq[Expression]): Expression = + if (es.size == 1) es.head + else DoPrim(PrimOps.Cat, Seq(es.head, cat(es.tail)), Nil, UnknownType) + def not(e: Expression): DoPrim = + DoPrim(PrimOps.Not, Seq(e), Nil, e.tpe) + + // Convert a port to a FIRRTL expression, handling polarity along the way. + def portToExpression(pp: PolarizedPort): Expression = + portToExpression(WRef(pp.name), Some(pp.polarity)) + + def portToExpression(exp: Expression, polarity: Option[PortPolarity]): Expression = + polarity match { + case Some(ActiveLow) | Some(NegativeEdge) => not(exp) + case _ => exp + } + + // Check if a number is a power of two + def isPowerOfTwo(x: Int): Boolean = (x & (x - 1)) == 0 +} diff --git a/tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala b/tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala new file mode 100644 index 000000000..4b427653b --- /dev/null +++ b/tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala @@ -0,0 +1,26 @@ +// See LICENSE for license details. + +package tapeout.transforms + +import firrtl.Mappers._ +import firrtl._ +import firrtl.annotations.{CircuitTarget, ModuleTarget, SingleTargetAnnotation} +import firrtl.ir._ +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency +import firrtl.options.{Dependency} + +class ExtraLowTransforms extends Transform with DependencyAPIMigration { + // this PropagatePresetAnnotations is needed to run the RemoveValidIf pass (that is removed from CIRCT). + // additionally, since that pass isn't explicitly a prereq of the LowFormEmitter it + // needs to wrapped in this xform + override def prerequisites: Seq[TransformDependency] = Forms.LowForm :+ + Dependency[firrtl.transforms.PropagatePresetAnnotations] + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def invalidates(a: Transform): Boolean = false + + def execute(state: CircuitState): CircuitState = { + state + } +} diff --git a/tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala b/tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala new file mode 100644 index 000000000..5a1a3fdbb --- /dev/null +++ b/tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala @@ -0,0 +1,51 @@ +package tapeout.transforms + +import tapeout.transforms.stage._ +import firrtl._ +import firrtl.annotations._ +import firrtl.ir._ +import firrtl.options.{Dependency, InputAnnotationFileAnnotation, StageMain} +import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, RunFirrtlTransformAnnotation} +import logger.LazyLogging + +private class GenerateModelStageMain(annotations: AnnotationSeq) extends LazyLogging { + val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } + + val annoFiles: List[String] = annotations.flatMap { + case InputAnnotationFileAnnotation(f) => Some(f) + case _ => None + }.toList + + // Dump firrtl and annotation files + // Use global param outAnno + protected def dumpAnnos( + annotations: AnnotationSeq + ): Unit = { + outAnno.foreach { annoPath => + val outputFile = new java.io.PrintWriter(annoPath) + outputFile.write(JsonProtocol.serialize(annotations.filter(_ match { + case _: DeletedAnnotation => false + case _: EmittedComponent => false + case _: EmittedAnnotation[_] => false + case _: FirrtlCircuitAnnotation => false + case _: OutAnnoAnnotation => false + case _ => true + }))) + outputFile.close() + } + } + + def executeStageMain(): Unit = { + val annos = new FirrtlStage().execute(Array.empty, annotations) + + annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { + case Some(circuit) => + dumpAnnos(annos) + case _ => + throw new Exception(s"executeStageMain failed while executing FIRRTL!\n") + } + } +} + +// main run class +object GenerateModelStageMain extends StageMain(new TapeoutStage()) diff --git a/tools/tapeout/src/main/scala/transforms/retime/Retime.scala b/tools/tapeout/src/main/scala/transforms/retime/Retime.scala new file mode 100644 index 000000000..321f3d424 --- /dev/null +++ b/tools/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -0,0 +1,48 @@ +// See LICENSE for license details. + +package tapeout.transforms.retime + +import chisel3.experimental.RunFirrtlTransform +import firrtl.annotations._ +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency +import firrtl.{CircuitState, DependencyAPIMigration, Transform} + +case class RetimeAnnotation(target: Named) extends SingleTargetAnnotation[Named] { + override def duplicate(n: Named): Annotation = RetimeAnnotation(n) +} + +class RetimeTransform extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def invalidates(a: Transform): Boolean = false + + override def execute(state: CircuitState): CircuitState = { + state.annotations.filter(_.isInstanceOf[RetimeAnnotation]) match { + case Nil => state + case seq => + seq.foreach { + case RetimeAnnotation(ModuleName(module, CircuitName(_))) => + logger.info(s"Retiming module $module") + case RetimeAnnotation(ComponentName(name, ModuleName(module, CircuitName(_)))) => + logger.info(s"Retiming instance $module.$name") + case _ => + throw new Exception(s"There should be RetimeAnnotations, got ${seq.mkString(" -- ")}") + } + state + } + } +} + +trait RetimeLib { + self: chisel3.Module => + + def retime[T <: chisel3.Module](module: T): Unit = { + chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation with RunFirrtlTransform { + def transformClass: Class[_ <: Transform] = classOf[RetimeTransform] + def toFirrtl: Annotation = RetimeAnnotation(module.toNamed) + }) + } +} diff --git a/tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala b/tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala new file mode 100644 index 000000000..5fdadf236 --- /dev/null +++ b/tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala @@ -0,0 +1,50 @@ +// See LICENSE for license details. + +package tapeout.transforms.stage + +import tapeout.transforms.GenerateModelStageMain +import chisel3.stage.ChiselCli +import firrtl.stage.{RunFirrtlTransformAnnotation} +import firrtl.AnnotationSeq +import firrtl.annotations.{Annotation, NoTargetAnnotation} +import firrtl.options.{HasShellOptions, Shell, ShellOption, Stage, Unserializable} +import firrtl.stage.FirrtlCli +import logger.Logger + +sealed trait TapeoutOption extends Unserializable { + this: Annotation => +} + +case class OutAnnoAnnotation(outAnno: String) extends NoTargetAnnotation with TapeoutOption + +object OutAnnoAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "out-anno-file", + shortOption = Some("oaf"), + toAnnotationSeq = (s: String) => Seq(OutAnnoAnnotation(s)), + helpText = "out-anno-file" + ) + ) +} + +trait TapeoutCli { + this: Shell => + parser.note("Tapeout specific options") + + Seq( + OutAnnoAnnotation + ).foreach(_.addOptions(parser)) +} + +class TapeoutStage() extends Stage { + override val shell: Shell = new Shell(applicationName = "tapeout") with TapeoutCli with ChiselCli with FirrtlCli + + override def run(annotations: AnnotationSeq): AnnotationSeq = { + Logger.makeScope(annotations) { + val stageMain = new GenerateModelStageMain(annotations) + stageMain.executeStageMain() + } + annotations + } +} diff --git a/tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala b/tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala new file mode 100644 index 000000000..5c18aa18f --- /dev/null +++ b/tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala @@ -0,0 +1,79 @@ +// See LICENSE for license details. + +package tapeout.transforms.utils + +import chisel3.experimental.{annotate, ChiselAnnotation} +import firrtl._ +import firrtl.annotations._ +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency +import firrtl.transforms.BlackBoxTargetDirAnno + +object WriteConfig { + def apply(dir: String, file: String, contents: String): Unit = { + val writer = new java.io.PrintWriter(new java.io.File(s"$dir/$file")) + writer.write(contents) + writer.close() + } +} + +object GetTargetDir { + def apply(state: CircuitState): String = { + val annos = state.annotations + val destDir = annos.map { + case BlackBoxTargetDirAnno(s) => Some(s) + case _ => None + }.flatten + val loc = { + if (destDir.isEmpty) "." + else destDir.head + } + val targetDir = new java.io.File(loc) + if (!targetDir.exists()) FileUtils.makeDirectory(targetDir.getAbsolutePath) + loc + } +} + +trait HasSetTechnologyLocation { + self: chisel3.Module => + + def setTechnologyLocation(dir: String) { + annotate(new ChiselAnnotation { + override def toFirrtl: Annotation = { + TechnologyLocationAnnotation(dir) + } + }) + } +} + +case class TechnologyLocationAnnotation(dir: String) extends SingleTargetAnnotation[CircuitName] { + val target: CircuitName = CircuitName("All") + override def duplicate(n: CircuitName): Annotation = TechnologyLocationAnnotation(dir) +} + +class TechnologyLocation extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + + def execute(state: CircuitState): CircuitState = { + throw new Exception("Technology Location transform execution doesn't work!") + } + + def get(state: CircuitState): String = { + val annos = state.annotations + val dir = annos.flatMap { + case TechnologyLocationAnnotation(dir) => Some(dir) + case _ => None + } + dir.length match { + case 0 => "" + case 1 => + val targetDir = new java.io.File(dir.head) + if (!targetDir.exists()) throw new Exception(s"Technology yaml directory $targetDir doesn't exist!") + dir.head + case _ => throw new Exception("Only 1 tech directory annotation allowed!") + } + } +} diff --git a/tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala b/tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala new file mode 100644 index 000000000..ef417e2c2 --- /dev/null +++ b/tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala @@ -0,0 +1,5 @@ +package tapeout.transforms.utils + +object LowerName { + def apply(s: String): String = s.replace(".", "_").replace("[", "_").replace("]", "") +} diff --git a/tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala b/tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala new file mode 100644 index 000000000..6c277fea6 --- /dev/null +++ b/tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala @@ -0,0 +1,27 @@ +package tapeout.transforms.utils + +import chisel3._ + +import scala.collection.immutable.ListMap + +class CustomBundle[T <: Data](elts: (String, T)*) extends Record { + val elements = ListMap(elts.map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) + def apply(elt: String): T = elements(elt) + def apply(elt: Int): T = elements(elt.toString) +} + +class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { + // Must be String, Data + val elements = ListMap(elts.map { case (field, elt) => field.toString -> chiselTypeOf(elt) }: _*) + // TODO: Make an equivalent to the below work publicly (or only on subclasses?) + def indexedElements = ListMap(elts.map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) + def apply(elt: Int): T = elements(elt.toString) +} + +object CustomIndexedBundle { + def apply[T <: Data](gen: T, idxs: Seq[Int]) = new CustomIndexedBundle(idxs.map(_ -> gen): _*) + // Allows Vecs of elements of different types/widths + def apply[T <: Data](gen: Seq[T]) = new CustomIndexedBundle(gen.zipWithIndex.map { case (elt, field) => + field -> elt + }: _*) +} diff --git a/tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala b/tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala new file mode 100644 index 000000000..0dae7ffa5 --- /dev/null +++ b/tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala @@ -0,0 +1,23 @@ +package tapeout.transforms.utils + +import firrtl.FileUtils +import net.jcazevedo.moultingyaml._ + +import java.io.File + +class YamlFileReader(resource: String) { + def parse[A](file: String = "")(implicit reader: YamlReader[A]): Seq[A] = { + // If the user doesn't provide a Yaml file name, use defaults + val yamlString = file match { + case f if f.isEmpty => + // Use example config if no file is provided + val stream = FileUtils.getTextResource(resource) + stream + case f if new File(f).exists => + FileUtils.getText(f) + case _ => + throw new Exception("No valid Yaml file found!") + } + yamlString.parseYamls.map(x => reader.read(x)) + } +}