From 618d6b5db30c7a714839687fd50ec654d59729e5 Mon Sep 17 00:00:00 2001 From: Jack Koenig Date: Mon, 24 Oct 2016 19:46:38 -0700 Subject: [PATCH 001/273] Initial commit --- .gitignore | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..c58d83b31 --- /dev/null +++ b/.gitignore @@ -0,0 +1,17 @@ +*.class +*.log + +# sbt specific +.cache +.history +.lib/ +dist/* +target/ +lib_managed/ +src_managed/ +project/boot/ +project/plugins/project/ + +# Scala-IDE specific +.scala_dependencies +.worksheet From 08855aaa64d93a340aca0ec256f92d7e03e93a58 Mon Sep 17 00:00:00 2001 From: Jack Date: Mon, 24 Oct 2016 19:49:10 -0700 Subject: [PATCH 002/273] Add BSD License --- LICENSE | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..3ed5e8273 --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2016, The Regents of the University of California (Regents) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the Regents nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. From 52e8b6b04a458cc0470f391de9450ce177921a79 Mon Sep 17 00:00:00 2001 From: Jack Date: Mon, 24 Oct 2016 20:56:34 -0700 Subject: [PATCH 003/273] Add *.swp to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c58d83b31..1f855d9bf 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ *.class *.log +*.swp # sbt specific .cache From a1c7742a57a2c7837b9398b67afca9095d5aec59 Mon Sep 17 00:00:00 2001 From: Chick Markley Date: Mon, 24 Oct 2016 21:00:14 -0700 Subject: [PATCH 004/273] Add ExecutionOptionsManager Taken from https://github.com/ucb-bar/firrtl --- build.sbt | 16 +++ .../ExecutionOptionsManager.scala | 126 ++++++++++++++++++ .../ExecutionOptionsManagerSpec.scala | 31 +++++ project/build.properties | 1 + project/dependencies.scala | 16 +++ 5 files changed, 190 insertions(+) create mode 100644 build.sbt create mode 100644 executionoptions/src/main/scala/bar/executionoptions/ExecutionOptionsManager.scala create mode 100644 executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala create mode 100644 project/build.properties create mode 100644 project/dependencies.scala diff --git a/build.sbt b/build.sbt new file mode 100644 index 000000000..eb35f36c8 --- /dev/null +++ b/build.sbt @@ -0,0 +1,16 @@ +// See LICENSE for license details. + +import Dependencies._ + +lazy val commonSettings = Seq( + organization := "edu.berkeley.cs", + version := "0.1-SNAPSHOT", + scalaVersion := "2.11.8", + libraryDependencies ++= commonDependencies +) + +lazy val executionoptions = (project in file("executionoptions")) + .settings(commonSettings) + .settings( + libraryDependencies ++= executionoptionsDependencies + ) diff --git a/executionoptions/src/main/scala/bar/executionoptions/ExecutionOptionsManager.scala b/executionoptions/src/main/scala/bar/executionoptions/ExecutionOptionsManager.scala new file mode 100644 index 000000000..05a1e75cf --- /dev/null +++ b/executionoptions/src/main/scala/bar/executionoptions/ExecutionOptionsManager.scala @@ -0,0 +1,126 @@ +// See LICENSE for license details. + +package bar.executionoptions + +import scopt.OptionParser + +/** + * Use this trait to define an options class that can add its private command line options to a externally + * declared parser + */ +trait ComposableOptions + +/** + * Most of the chisel toolchain components require a topName which defines a circuit or a device under test. + * Much of the work that is done takes place in a directory. + * It would be simplest to require topName to be defined but in practice it is preferred to defer this. + * For example, in chisel, by deferring this it is possible for the execute there to first elaborate the + * circuit and then set the topName from that if it has not already been set. + */ +case class CommonOptions(topName: String = "", targetDirName: String = "test_run_dir") extends ComposableOptions + +abstract class HasParser(applicationName: String) { + final val parser: OptionParser[Unit] = new OptionParser[Unit](applicationName) {} +} + +trait HasCommonOptions { + self: ExecutionOptionsManager => + var commonOptions = CommonOptions() + + parser.note("common options") + + parser.opt[String]("top-name") + .abbr("tn") + .valueName("") + .foreach { x => + commonOptions = commonOptions.copy(topName = x) + } + .text("This options defines the top level circuit, defaults to dut when possible") + + parser.opt[String]("target-dir") + .abbr("td").valueName("") + .foreach { x => + commonOptions = commonOptions.copy(targetDirName = x) + } + .text(s"This options defines a work directory for intermediate files, default is ${commonOptions.targetDirName}") + + parser.help("help").text("prints this usage text") +} + +/** + * + * @param applicationName The name shown in the usage + */ +class ExecutionOptionsManager(val applicationName: String) extends HasParser(applicationName) with HasCommonOptions { + + def parse(args: Array[String]): Boolean = { + parser.parse(args) + } + + def showUsageAsError(): Unit = parser.showUsageAsError() + + /** + * make sure that all levels of targetDirName exist + * + * @return true if directory exists + */ + def makeTargetDir(): Boolean = { + (new java.io.File(commonOptions.targetDirName)).mkdirs() + } + + def targetDirName: String = commonOptions.targetDirName + + /** + * this function sets the topName in the commonOptions. + * It would be nicer to not need this but many chisel tools cannot determine + * the name of the device under test until other options have been parsed. + * Havin this function allows the code to set the TopName after it has been + * determined + * + * @param newTopName the topName to be used + */ + def setTopName(newTopName: String): Unit = { + commonOptions = commonOptions.copy(topName = newTopName) + } + def setTopNameIfNotSet(newTopName: String): Unit = { + if(commonOptions.topName.isEmpty) { + setTopName(newTopName) + } + } + def topName: String = commonOptions.topName + def setTargetDirName(newTargetDirName: String): Unit = { + commonOptions = commonOptions.copy(targetDirName = newTargetDirName) + } + + /** + * return a file based on targetDir, topName and suffix + * Will not add the suffix if the topName already ends with that suffix + * + * @param suffix suffix to add, removes . if present + * @param fileNameOverride this will override the topName if nonEmpty, when using this targetDir is ignored + * @return + */ + def getBuildFileName(suffix: String, fileNameOverride: String = ""): String = { + makeTargetDir() + + val baseName = if(fileNameOverride.nonEmpty) fileNameOverride else topName + val directoryName = { + if(fileNameOverride.nonEmpty) { + "" + } + else if(baseName.startsWith("./") || baseName.startsWith("/")) { + "" + } + else { + if(targetDirName.endsWith("/")) targetDirName else targetDirName + "/" + } + } + val normalizedSuffix = { + val dottedSuffix = if(suffix.startsWith(".")) suffix else s".$suffix" + if(baseName.endsWith(dottedSuffix)) "" else dottedSuffix + } + + s"$directoryName$baseName$normalizedSuffix" + } +} + diff --git a/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala b/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala new file mode 100644 index 000000000..f26b1fc55 --- /dev/null +++ b/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala @@ -0,0 +1,31 @@ +// See LICENSE for license details. + +package bar.executionoptions + +import org.scalatest.{Matchers, FreeSpec} + +class ExecutionOptionsManagerSpec extends FreeSpec with Matchers { + "ExecutionOptionsManager is a container for one more more ComposableOptions Block" - { + "It has a default CommonOptionsBlock" in { + val manager = new ExecutionOptionsManager("test") + manager.commonOptions.targetDirName should be ("test_run_dir") + } + "But can override defaults like this" in { + val manager = new ExecutionOptionsManager("test") { commonOptions = CommonOptions(topName = "dog") } + manager.commonOptions shouldBe a [CommonOptions] + manager.topName should be ("dog") + manager.commonOptions.topName should be ("dog") + } + "The add method should put a new version of a given type the manager" in { + val manager = new ExecutionOptionsManager("test") { commonOptions = CommonOptions(topName = "dog") } + val initialCommon = manager.commonOptions + initialCommon.topName should be ("dog") + + manager.commonOptions = CommonOptions(topName = "cat") + + val afterCommon = manager.commonOptions + afterCommon.topName should be ("cat") + initialCommon.topName should be ("dog") + } + } +} diff --git a/project/build.properties b/project/build.properties new file mode 100644 index 000000000..35c88bab7 --- /dev/null +++ b/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.12 diff --git a/project/dependencies.scala b/project/dependencies.scala new file mode 100644 index 000000000..e9367cfca --- /dev/null +++ b/project/dependencies.scala @@ -0,0 +1,16 @@ +import sbt._ +import Keys._ + +object Dependencies { + val scalatestVersion = "3.0.0" + val scalatest = "org.scalatest" %% "scalatest" % scalatestVersion % "test" + val scoptVersion = "3.4.0" + val scopt = "com.github.scopt" %% "scopt" % scoptVersion + + val commonDependencies: Seq[ModuleID] = Seq( + scalatest + ) + val executionoptionsDependencies: Seq[ModuleID] = Seq( + scopt + ) +} From 4b6110f0f66b6dd797ead957c568adced555325f Mon Sep 17 00:00:00 2001 From: chick Date: Thu, 27 Oct 2016 10:25:58 -0700 Subject: [PATCH 005/273] Change spec to show a better way to change options --- .../ExecutionOptionsManagerSpec.scala | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala b/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala index f26b1fc55..8f15f940c 100644 --- a/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala +++ b/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala @@ -16,16 +16,38 @@ class ExecutionOptionsManagerSpec extends FreeSpec with Matchers { manager.topName should be ("dog") manager.commonOptions.topName should be ("dog") } - "The add method should put a new version of a given type the manager" in { - val manager = new ExecutionOptionsManager("test") { commonOptions = CommonOptions(topName = "dog") } + "The proper way to change an option in is to copy the existing sub-option with the desired new value" in { + val manager = new ExecutionOptionsManager("test") { + commonOptions = CommonOptions(targetDirName = "fox", topName = "dog") + } val initialCommon = manager.commonOptions + initialCommon.targetDirName should be ("fox") initialCommon.topName should be ("dog") - manager.commonOptions = CommonOptions(topName = "cat") + manager.commonOptions = manager.commonOptions.copy(topName = "cat") val afterCommon = manager.commonOptions afterCommon.topName should be ("cat") + afterCommon.targetDirName should be ("fox") initialCommon.topName should be ("dog") } + "The following way of changing a manager should not be used, as it could alter other desired values" - { + "Note that the initial setting targetDirName is lost when using this method" in { + val manager = new ExecutionOptionsManager("test") { + commonOptions = CommonOptions(targetDirName = "fox", topName = "dog") + } + val initialCommon = manager.commonOptions + initialCommon.topName should be("dog") + + manager.commonOptions = CommonOptions(topName = "cat") + + val afterCommon = manager.commonOptions + initialCommon.topName should be("dog") + afterCommon.topName should be("cat") + + // This is probably bad + afterCommon.targetDirName should not be "fox" + } + } } } From d86dea58cf2493ace790ca9e112a8affaa80e7ac Mon Sep 17 00:00:00 2001 From: Angie Wang Date: Fri, 17 Feb 2017 11:58:05 -0800 Subject: [PATCH 006/273] Tapeout (#4) * remove outdated files * pulled resetinverter from dsptools + setup repo * fix some package names, misc. dsptools dependencies, typo in build.sbt, + circuitstate in resetinverter pass * add more comprehensive gitignore + license back in * create directory structure to match package structure * change package names to barstools.tapeout * settled on barstools.tapeout.transforms package * make directory + build structure more amenable for multiple sub projects --- .gitignore | 329 +++++++++++++++++- LICENSE | 2 +- build.sbt | 14 +- .../ExecutionOptionsManager.scala | 126 ------- .../ExecutionOptionsManagerSpec.scala | 53 --- ...{dependencies.scala => Dependencies.scala} | 13 +- project/build.properties | 2 +- scalastyle-config.xml | 110 ++++++ scalastyle-test-config.xml | 109 ++++++ .../main/scala/transforms/ResetInverter.scala | 65 ++++ .../scala/transforms/ResetInverterSpec.scala | 39 +++ 11 files changed, 670 insertions(+), 192 deletions(-) delete mode 100644 executionoptions/src/main/scala/bar/executionoptions/ExecutionOptionsManager.scala delete mode 100644 executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala rename project/{dependencies.scala => Dependencies.scala} (54%) create mode 100644 scalastyle-config.xml create mode 100644 scalastyle-test-config.xml create mode 100644 tapeout/src/main/scala/transforms/ResetInverter.scala create mode 100644 tapeout/src/test/scala/transforms/ResetInverterSpec.scala diff --git a/.gitignore b/.gitignore index 1f855d9bf..2179f6e04 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,317 @@ +### local stuff +src/main/scala/dsptools/sandbox.sc +test_run_dir/ +*.fir +*.anno +### XilinxISE template +# intermediate build files +*.bgn +*.bit +*.bld +*.cmd_log +*.drc +*.ll +*.lso +*.msd +*.msk +*.ncd +*.ngc +*.ngd +*.ngr +*.pad +*.par +*.pcf +*.prj +*.ptwx +*.rbb +*.rbd +*.stx +*.syr +*.twr +*.twx +*.unroutes +*.ut +*.xpi +*.xst +*_bitgen.xwbt +*_envsettings.html +*_map.map +*_map.mrp +*_map.ngm +*_map.xrpt +*_ngdbuild.xrpt +*_pad.csv +*_pad.txt +*_par.xrpt +*_summary.html +*_summary.xml +*_usage.xml +*_xst.xrpt + +# project-wide generated files +*.gise +par_usage_statistics.html +usage_statistics_webtalk.html +webtalk.log +webtalk_pn.xml + +# generated folders +iseconfig/ +xlnx_auto_0_xdb/ +xst/ +_ngo/ +_xmsgs/ +### Eclipse template +*.pydevproject +.metadata +.gradle +bin/ +tmp/ +*.tmp +*.bak +*.swp +*~.nib +local.properties +.settings/ +.loadpath + +# Eclipse Core +.project + +# External tool builders +.externalToolBuilders/ + +# Locally stored "Eclipse launch configurations" +*.launch + +# CDT-specific +.cproject + +# JDT-specific (Eclipse Java Development Tools) +.classpath + +# Java annotation processor (APT) +.factorypath + +# PDT-specific +.buildpath + +# sbteclipse plugin +.target + +# TeXlipse plugin +.texlipse +### C template +# Object files +*.o +*.ko +*.obj +*.elf + +# Precompiled Headers +*.gch +*.pch + +# Libraries +*.lib +*.a +*.la +*.lo + +# Shared objects (inc. Windows DLLs) +*.dll +*.so +*.so.* +*.dylib + +# Executables +*.exe +*.out +*.app +*.i*86 +*.x86_64 +*.hex + +# Debug files +*.dSYM/ +### SBT template +# Simple Build Tool +# http://www.scala-sbt.org/release/docs/Getting-Started/Directories.html#configuring-version-control + +target/ +lib_managed/ +src_managed/ +project/boot/ +.history +.cache +### Emacs template +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +### Vim template +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist +*~ +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio + +*.iml + +## Directory-based project format: +.idea/ +# if you remove the above rule, at least ignore the following: + +# User-specific stuff: +# .idea/workspace.xml +# .idea/tasks.xml +# .idea/dictionaries + +# Sensitive or high-churn files: +# .idea/dataSources.ids +# .idea/dataSources.xml +# .idea/sqlDataSources.xml +# .idea/dynamic.xml +# .idea/uiDesigner.xml + +# Gradle: +# .idea/gradle.xml +# .idea/libraries + +# Mongo Explorer plugin: +# .idea/mongoSettings.xml + +## File-based project format: +*.ipr +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +### C++ template +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app +### OSX template +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk +### Xcode template +# Xcode +# +# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore + +## Build generated +build/ +DerivedData + +## Various settings +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 +xcuserdata + +## Other +*.xccheckout +*.moved-aside +*.xcuserstate +### Scala template *.class *.log -*.swp # sbt specific .cache @@ -16,3 +327,19 @@ project/plugins/project/ # Scala-IDE specific .scala_dependencies .worksheet +### Java template +*.class + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.jar +*.war +*.ear + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* + +# ignore lib from rocket build +lib diff --git a/LICENSE b/LICENSE index 3ed5e8273..ad326246a 100644 --- a/LICENSE +++ b/LICENSE @@ -26,4 +26,4 @@ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/build.sbt b/build.sbt index eb35f36c8..7374d9915 100644 --- a/build.sbt +++ b/build.sbt @@ -6,11 +6,19 @@ lazy val commonSettings = Seq( organization := "edu.berkeley.cs", version := "0.1-SNAPSHOT", scalaVersion := "2.11.8", + scalacOptions := Seq("-deprecation", "-feature"), libraryDependencies ++= commonDependencies ) -lazy val executionoptions = (project in file("executionoptions")) +val defaultVersions = Map( + "chisel3" -> "3.1-SNAPSHOT", + "chisel-iotesters" -> "1.2-SNAPSHOT" +) + +lazy val tapeout = (project in file("tapeout")) .settings(commonSettings) .settings( - libraryDependencies ++= executionoptionsDependencies - ) + libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { + dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) + } + ) \ No newline at end of file diff --git a/executionoptions/src/main/scala/bar/executionoptions/ExecutionOptionsManager.scala b/executionoptions/src/main/scala/bar/executionoptions/ExecutionOptionsManager.scala deleted file mode 100644 index 05a1e75cf..000000000 --- a/executionoptions/src/main/scala/bar/executionoptions/ExecutionOptionsManager.scala +++ /dev/null @@ -1,126 +0,0 @@ -// See LICENSE for license details. - -package bar.executionoptions - -import scopt.OptionParser - -/** - * Use this trait to define an options class that can add its private command line options to a externally - * declared parser - */ -trait ComposableOptions - -/** - * Most of the chisel toolchain components require a topName which defines a circuit or a device under test. - * Much of the work that is done takes place in a directory. - * It would be simplest to require topName to be defined but in practice it is preferred to defer this. - * For example, in chisel, by deferring this it is possible for the execute there to first elaborate the - * circuit and then set the topName from that if it has not already been set. - */ -case class CommonOptions(topName: String = "", targetDirName: String = "test_run_dir") extends ComposableOptions - -abstract class HasParser(applicationName: String) { - final val parser: OptionParser[Unit] = new OptionParser[Unit](applicationName) {} -} - -trait HasCommonOptions { - self: ExecutionOptionsManager => - var commonOptions = CommonOptions() - - parser.note("common options") - - parser.opt[String]("top-name") - .abbr("tn") - .valueName("") - .foreach { x => - commonOptions = commonOptions.copy(topName = x) - } - .text("This options defines the top level circuit, defaults to dut when possible") - - parser.opt[String]("target-dir") - .abbr("td").valueName("") - .foreach { x => - commonOptions = commonOptions.copy(targetDirName = x) - } - .text(s"This options defines a work directory for intermediate files, default is ${commonOptions.targetDirName}") - - parser.help("help").text("prints this usage text") -} - -/** - * - * @param applicationName The name shown in the usage - */ -class ExecutionOptionsManager(val applicationName: String) extends HasParser(applicationName) with HasCommonOptions { - - def parse(args: Array[String]): Boolean = { - parser.parse(args) - } - - def showUsageAsError(): Unit = parser.showUsageAsError() - - /** - * make sure that all levels of targetDirName exist - * - * @return true if directory exists - */ - def makeTargetDir(): Boolean = { - (new java.io.File(commonOptions.targetDirName)).mkdirs() - } - - def targetDirName: String = commonOptions.targetDirName - - /** - * this function sets the topName in the commonOptions. - * It would be nicer to not need this but many chisel tools cannot determine - * the name of the device under test until other options have been parsed. - * Havin this function allows the code to set the TopName after it has been - * determined - * - * @param newTopName the topName to be used - */ - def setTopName(newTopName: String): Unit = { - commonOptions = commonOptions.copy(topName = newTopName) - } - def setTopNameIfNotSet(newTopName: String): Unit = { - if(commonOptions.topName.isEmpty) { - setTopName(newTopName) - } - } - def topName: String = commonOptions.topName - def setTargetDirName(newTargetDirName: String): Unit = { - commonOptions = commonOptions.copy(targetDirName = newTargetDirName) - } - - /** - * return a file based on targetDir, topName and suffix - * Will not add the suffix if the topName already ends with that suffix - * - * @param suffix suffix to add, removes . if present - * @param fileNameOverride this will override the topName if nonEmpty, when using this targetDir is ignored - * @return - */ - def getBuildFileName(suffix: String, fileNameOverride: String = ""): String = { - makeTargetDir() - - val baseName = if(fileNameOverride.nonEmpty) fileNameOverride else topName - val directoryName = { - if(fileNameOverride.nonEmpty) { - "" - } - else if(baseName.startsWith("./") || baseName.startsWith("/")) { - "" - } - else { - if(targetDirName.endsWith("/")) targetDirName else targetDirName + "/" - } - } - val normalizedSuffix = { - val dottedSuffix = if(suffix.startsWith(".")) suffix else s".$suffix" - if(baseName.endsWith(dottedSuffix)) "" else dottedSuffix - } - - s"$directoryName$baseName$normalizedSuffix" - } -} - diff --git a/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala b/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala deleted file mode 100644 index 8f15f940c..000000000 --- a/executionoptions/src/test/scala/bar/executionoptions/ExecutionOptionsManagerSpec.scala +++ /dev/null @@ -1,53 +0,0 @@ -// See LICENSE for license details. - -package bar.executionoptions - -import org.scalatest.{Matchers, FreeSpec} - -class ExecutionOptionsManagerSpec extends FreeSpec with Matchers { - "ExecutionOptionsManager is a container for one more more ComposableOptions Block" - { - "It has a default CommonOptionsBlock" in { - val manager = new ExecutionOptionsManager("test") - manager.commonOptions.targetDirName should be ("test_run_dir") - } - "But can override defaults like this" in { - val manager = new ExecutionOptionsManager("test") { commonOptions = CommonOptions(topName = "dog") } - manager.commonOptions shouldBe a [CommonOptions] - manager.topName should be ("dog") - manager.commonOptions.topName should be ("dog") - } - "The proper way to change an option in is to copy the existing sub-option with the desired new value" in { - val manager = new ExecutionOptionsManager("test") { - commonOptions = CommonOptions(targetDirName = "fox", topName = "dog") - } - val initialCommon = manager.commonOptions - initialCommon.targetDirName should be ("fox") - initialCommon.topName should be ("dog") - - manager.commonOptions = manager.commonOptions.copy(topName = "cat") - - val afterCommon = manager.commonOptions - afterCommon.topName should be ("cat") - afterCommon.targetDirName should be ("fox") - initialCommon.topName should be ("dog") - } - "The following way of changing a manager should not be used, as it could alter other desired values" - { - "Note that the initial setting targetDirName is lost when using this method" in { - val manager = new ExecutionOptionsManager("test") { - commonOptions = CommonOptions(targetDirName = "fox", topName = "dog") - } - val initialCommon = manager.commonOptions - initialCommon.topName should be("dog") - - manager.commonOptions = CommonOptions(topName = "cat") - - val afterCommon = manager.commonOptions - initialCommon.topName should be("dog") - afterCommon.topName should be("cat") - - // This is probably bad - afterCommon.targetDirName should not be "fox" - } - } - } -} diff --git a/project/dependencies.scala b/project/Dependencies.scala similarity index 54% rename from project/dependencies.scala rename to project/Dependencies.scala index e9367cfca..6395b9806 100644 --- a/project/dependencies.scala +++ b/project/Dependencies.scala @@ -4,13 +4,12 @@ import Keys._ object Dependencies { val scalatestVersion = "3.0.0" val scalatest = "org.scalatest" %% "scalatest" % scalatestVersion % "test" - val scoptVersion = "3.4.0" - val scopt = "com.github.scopt" %% "scopt" % scoptVersion + val scalacheckVersion = "1.12.4" + val scalacheck = "org.scalacheck" %% "scalacheck" % scalacheckVersion % "test" val commonDependencies: Seq[ModuleID] = Seq( - scalatest + scalatest, + scalacheck ) - val executionoptionsDependencies: Seq[ModuleID] = Seq( - scopt - ) -} + +} \ No newline at end of file diff --git a/project/build.properties b/project/build.properties index 35c88bab7..7d789d45d 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.13.12 +sbt.version=0.13.12 \ No newline at end of file diff --git a/scalastyle-config.xml b/scalastyle-config.xml new file mode 100644 index 000000000..57ef60a26 --- /dev/null +++ b/scalastyle-config.xml @@ -0,0 +1,110 @@ + + Scalastyle standard configuration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No lines ending with a ; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + |\|\||&&|:=|<>|<=|>=|!=|===|<<|>>|##|unary_(~|\-%?|!))$]]> + + + + + + + + + + + diff --git a/scalastyle-test-config.xml b/scalastyle-test-config.xml new file mode 100644 index 000000000..bf32aacd4 --- /dev/null +++ b/scalastyle-test-config.xml @@ -0,0 +1,109 @@ + + Scalastyle configuration for Chisel3 unit tests + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No lines ending with a ; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + |\|\||&&|:=|<>|<=|>=|!=|===|<<|>>|##|unary_(~|\-%?|!))$]]> + + + + + + + + + + + diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala new file mode 100644 index 000000000..f0bd34498 --- /dev/null +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -0,0 +1,65 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import chisel3.internal.InstanceId +import firrtl.PrimOps.Not +import firrtl.annotations.{Annotation, CircuitName, ModuleName, Named} +import firrtl.ir.{Input, UIntType, IntWidth, Module, Port, DefNode, NoInfo, Reference, DoPrim, Block, Circuit} +import firrtl.passes.Pass +import firrtl.{CircuitForm, CircuitState, LowForm, Transform} + +object ResetInverterAnnotation { + def apply(target: ModuleName): Annotation = Annotation(target, classOf[ResetInverterTransform], "invert") + def unapply(a: Annotation): Option[Named] = a match { + case Annotation(m, t, "invert") if t == classOf[ResetInverterTransform] => Some(m) + case _ => None + } +} + +object ResetN extends Pass { + def name: String = "ResetN" + private val Bool = UIntType(IntWidth(1)) + // Only works on Modules with a Bool port named reset + def invertReset(mod: Module): Module = { + // Check that it actually has reset + require(mod.ports.exists(p => p.name == "reset" && p.tpe == Bool), + "Can only invert reset on a module with reset!") + // Rename "reset" to "reset_n" + val portsx = mod.ports map { + case Port(info, "reset", Input, Bool) => Port(info, "reset_n", Input, Bool) + case other => other + } + val newReset = DefNode(NoInfo, "reset", DoPrim(Not, Seq(Reference("reset_n", Bool)), Seq.empty, Bool)) + val bodyx = Block(Seq(newReset, mod.body)) + mod.copy(ports = portsx, body = bodyx) + } + + def run(c: Circuit): Circuit = + c.copy(modules = c.modules map { + case mod: Module if mod.name == c.main => invertReset(mod) + case other => other + }) +} + +class ResetInverterTransform extends Transform { + override def inputForm: CircuitForm = LowForm + override def outputForm: CircuitForm = LowForm + + override def execute(state: CircuitState): CircuitState = { + getMyAnnotations(state) match { + case Nil => CircuitState(state.circuit, LowForm) + case Seq(ResetInverterAnnotation(ModuleName(state.circuit.main, CircuitName(_)))) => + CircuitState(ResetN.run(state.circuit), LowForm) + case annotations => + throw new Exception(s"There should be only one InvertReset annotation: got ${annotations.mkString(" -- ")}") + } + } +} + +trait ResetInverter { + self: chisel3.Module => + def invert(component: InstanceId): Unit = { + annotate(chisel3.experimental.ChiselAnnotation(component, classOf[ResetInverterTransform], "invert")) + } +} diff --git a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala new file mode 100644 index 000000000..fd49435ed --- /dev/null +++ b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala @@ -0,0 +1,39 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import chisel3._ +import chisel3.util.RegInit +import firrtl._ +import org.scalatest.{FreeSpec, Matchers} + +class ExampleModuleNeedsResetInverted extends Module with ResetInverter { + val io = IO(new Bundle { + val out = Output(UInt(32.W)) + }) + + val r = RegInit(0.U) + + invert(this) +} + +class ResetNSpec extends FreeSpec with Matchers { + + "Inverting reset needs to be done throughout module" in { + val optionsManager = new ExecutionOptionsManager("dsptools") with HasChiselExecutionOptions with HasFirrtlOptions { + firrtlOptions = firrtlOptions.copy(compilerName = "low") + } + chisel3.Driver.execute(optionsManager, () => new ExampleModuleNeedsResetInverted) match { + case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, firrtl))) => + chirrtl should include ("input reset :") + chirrtl should not include "input reset_n :" + chirrtl should not include "node reset = not(reset_n)" + + firrtl should include ("input reset_n :") + firrtl should include ("node reset = not(reset_n)") + firrtl should not include "input reset :" + case _ => + // bad + } + } +} \ No newline at end of file From 43f1a699ad54721096d203f50072d2181d1c0bcd Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Tue, 21 Feb 2017 11:11:33 -0800 Subject: [PATCH 007/273] Move passes from pfpmp to barstools. (#5) * Move passes from pfpmp to barstools. * add an app that does both the harness and top generation This reduces the number of firrtl.compile calls * Add the ability to read annotations file This helps with chisel annotation integration --- .../transforms/ConvertToExtModPass.scala | 36 ++++++ .../scala/transforms/EnumerateModules.scala | 32 +++++ .../scala/transforms/GenerateHarness.scala | 79 ++++++++++++ .../main/scala/transforms/GenerateTop.scala | 77 +++++++++++ .../transforms/GenerateTopAndHarness.scala | 120 ++++++++++++++++++ .../scala/transforms/ReParentCircuit.scala | 26 ++++ .../transforms/RemoveUnusedModules.scala | 59 +++++++++ .../RenameModulesAndInstances.scala | 42 ++++++ 8 files changed, 471 insertions(+) create mode 100644 tapeout/src/main/scala/transforms/ConvertToExtModPass.scala create mode 100644 tapeout/src/main/scala/transforms/EnumerateModules.scala create mode 100644 tapeout/src/main/scala/transforms/GenerateHarness.scala create mode 100644 tapeout/src/main/scala/transforms/GenerateTop.scala create mode 100644 tapeout/src/main/scala/transforms/GenerateTopAndHarness.scala create mode 100644 tapeout/src/main/scala/transforms/ReParentCircuit.scala create mode 100644 tapeout/src/main/scala/transforms/RemoveUnusedModules.scala create mode 100644 tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala diff --git a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala new file mode 100644 index 000000000..98425fd0c --- /dev/null +++ b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala @@ -0,0 +1,36 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.passes.Pass + +// Converts some modules to external modules, based on a given function. If +// that function returns "true" then the module is converted into an ExtModule, +// otherwise it's left alone. +class ConvertToExtModPass(classify: (Module) => Boolean) extends Pass { + def name = "Convert to External Modules" + + def run(c: Circuit): Circuit = { + val modulesx = c.modules.map { + case m: ExtModule => m + case m: Module => + if (classify(m)) { + new ExtModule(m.info, m.name, m.ports, m.name, Seq.empty) + } else { + m + } + } + Circuit(c.info, modulesx, c.main) + } +} +class ConvertToExtMod(classify: (Module) => Boolean) extends Transform with PassBased { + def inputForm = MidForm + def outputForm = MidForm + def passSeq = Seq(new ConvertToExtModPass(classify)) + + def execute(state: CircuitState): CircuitState = { + CircuitState(runPasses(state.circuit), state.form) + } +} diff --git a/tapeout/src/main/scala/transforms/EnumerateModules.scala b/tapeout/src/main/scala/transforms/EnumerateModules.scala new file mode 100644 index 000000000..ec4389c62 --- /dev/null +++ b/tapeout/src/main/scala/transforms/EnumerateModules.scala @@ -0,0 +1,32 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.passes.Pass + +class EnumerateModulesPass(enumerate: (Module) => Unit) extends Pass { + def name = "Enumurate Modules" + + def run(c: Circuit): Circuit = { + val modulesx = c.modules.map { + case m: ExtModule => m + case m: Module => { + enumerate(m) + m + } + } + Circuit(c.info, modulesx, c.main) + } +} + +class EnumerateModules(enumerate: (Module) => Unit) extends Transform with PassBased { + def inputForm = LowForm + def outputForm = LowForm + def passSeq = Seq(new EnumerateModulesPass(enumerate)) + + def execute(state: CircuitState): CircuitState = { + CircuitState(runPasses(state.circuit), state.form) + } +} diff --git a/tapeout/src/main/scala/transforms/GenerateHarness.scala b/tapeout/src/main/scala/transforms/GenerateHarness.scala new file mode 100644 index 000000000..eea7960ee --- /dev/null +++ b/tapeout/src/main/scala/transforms/GenerateHarness.scala @@ -0,0 +1,79 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.annotations._ +import firrtl.passes.Pass + +object AllModules { + private var modules = Set[String]() + def add(module: String) = { + modules = modules | Set(module) + } + def rename(module: String) = { + var new_name = module + while (modules.contains(new_name)) + new_name = new_name + "_inTestHarness" + new_name + } +} + +object GenerateHarness extends App { + var input: Option[String] = None + var output: Option[String] = None + var synTop: Option[String] = None + var harnessTop: Option[String] = None + + var usedOptions = Set.empty[Integer] + args.zipWithIndex.foreach{ case (arg, i) => + arg match { + case "-i" => { + input = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "-o" => { + output = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--syn-top" => { + synTop = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--harness-top" => { + harnessTop = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case _ => { + if (! (usedOptions contains i)) { + error("Unknown option " + arg) + } + } + } + } + + firrtl.Driver.compile( + input.get, + output.get, + new VerilogCompiler(), + Parser.UseInfo, + Seq( + new ReParentCircuit(synTop.get), + new RemoveUnusedModules, + new EnumerateModules( { m => if (m.name != synTop.get) { AllModules.add(m.name) } } ) + ) + ) + + firrtl.Driver.compile( + input.get, + output.get, + new VerilogCompiler(), + Parser.UseInfo, + Seq( + new ConvertToExtMod((m) => m.name == synTop.get), + new RemoveUnusedModules, + new RenameModulesAndInstances((m) => AllModules.rename(m)) + ) + ) +} diff --git a/tapeout/src/main/scala/transforms/GenerateTop.scala b/tapeout/src/main/scala/transforms/GenerateTop.scala new file mode 100644 index 000000000..dc069a5b1 --- /dev/null +++ b/tapeout/src/main/scala/transforms/GenerateTop.scala @@ -0,0 +1,77 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.annotations._ +import firrtl.passes.Pass + +object GenerateTop extends App { + var input: Option[String] = None + var output: Option[String] = None + var synTop: Option[String] = None + var harnessTop: Option[String] = None + var seqMemFlags: Option[String] = Some("-o:unused.confg") + var listClocks: Option[String] = Some("-o:unused.clocks") + + var usedOptions = Set.empty[Integer] + args.zipWithIndex.foreach{ case (arg, i) => + arg match { + case "-i" => { + input = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "-o" => { + output = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--syn-top" => { + synTop = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--harness-top" => { + harnessTop = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--seq-mem-flags" => { + seqMemFlags = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--list-clocks" => { + listClocks = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case _ => { + if (! (usedOptions contains i)) { + error("Unknown option " + arg) + } + } + } + } + + firrtl.Driver.compile( + input.get, + output.get, + new VerilogCompiler(), + Parser.UseInfo, + Seq( + new ReParentCircuit(synTop.get), + new RemoveUnusedModules, + new passes.memlib.InferReadWrite(), + new passes.memlib.ReplSeqMem(), + new passes.clocklist.ClockListTransform() + ), + AnnotationMap(Seq( + passes.memlib.InferReadWriteAnnotation( + s"${synTop.get}" + ), + passes.clocklist.ClockListAnnotation( + s"-c:${synTop.get}:-m:${synTop.get}:${listClocks.get}" + ), + passes.memlib.ReplSeqMemAnnotation( + s"-c:${synTop.get}:${seqMemFlags.get}" + ) + )) + ) +} diff --git a/tapeout/src/main/scala/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/transforms/GenerateTopAndHarness.scala new file mode 100644 index 000000000..06dbe1554 --- /dev/null +++ b/tapeout/src/main/scala/transforms/GenerateTopAndHarness.scala @@ -0,0 +1,120 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.annotations._ +import firrtl.passes.Pass + +import java.io.File +import firrtl.annotations.AnnotationYamlProtocol._ +import net.jcazevedo.moultingyaml._ + +object GenerateTopAndHarness extends App { + var input: Option[String] = None + var topOutput: Option[String] = None + var harnessOutput: Option[String] = None + var annoFile: Option[String] = None + var synTop: Option[String] = None + var harnessTop: Option[String] = None + var seqMemFlags: Option[String] = Some("-o:unused.confg") + var listClocks: Option[String] = Some("-o:unused.clocks") + + var usedOptions = Set.empty[Integer] + args.zipWithIndex.foreach{ case (arg, i) => + arg match { + case "-i" => { + input = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--top-o" => { + topOutput = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--harness-o" => { + harnessOutput = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--anno-file" => { + annoFile = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--syn-top" => { + synTop = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--harness-top" => { + harnessTop = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--seq-mem-flags" => { + seqMemFlags = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--list-clocks" => { + listClocks = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case _ => { + if (! (usedOptions contains i)) { + error("Unknown option " + arg) + } + } + } + } + //Load annotations from file + val annotationArray = annoFile match { + case None => Array[Annotation]() + case Some(fileName) => { + val annotations = new File(fileName) + if(annotations.exists) { + val annotationsYaml = io.Source.fromFile(annotations).getLines().mkString("\n").parseYaml + annotationsYaml.convertTo[Array[Annotation]] + } else { + Array[Annotation]() + } + } + } + + //Top Generation + firrtl.Driver.compile( + input.get, + topOutput.get, + new VerilogCompiler(), + Parser.UseInfo, + Seq( + new ReParentCircuit(synTop.get), + new RemoveUnusedModules, + new EnumerateModules( { m => if (m.name != synTop.get) { AllModules.add(m.name) } } ), + new passes.memlib.InferReadWrite(), + new passes.memlib.ReplSeqMem(), + new passes.clocklist.ClockListTransform() + ), + AnnotationMap(Seq( + passes.memlib.InferReadWriteAnnotation( + s"${synTop.get}" + ), + passes.clocklist.ClockListAnnotation( + s"-c:${synTop.get}:-m:${synTop.get}:${listClocks.get}" + ), + passes.memlib.ReplSeqMemAnnotation( + s"-c:${synTop.get}:${seqMemFlags.get}" + ) + ) ++ annotationArray) + ) + + //Harness Generation + firrtl.Driver.compile( + input.get, + harnessOutput.get, + new VerilogCompiler(), + Parser.UseInfo, + Seq( + new ConvertToExtMod((m) => m.name == synTop.get), + new RemoveUnusedModules, + new RenameModulesAndInstances((m) => AllModules.rename(m)) + ) + ) +} + diff --git a/tapeout/src/main/scala/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/transforms/ReParentCircuit.scala new file mode 100644 index 000000000..da3f079a6 --- /dev/null +++ b/tapeout/src/main/scala/transforms/ReParentCircuit.scala @@ -0,0 +1,26 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.passes.Pass + +// "Re-Parents" a circuit, which changes the top module to something else. +class ReParentCircuitPass(newTopName: String) extends Pass { + def name = "Re-Parent Circuit" + + def run(c: Circuit): Circuit = { + Circuit(c.info, c.modules, newTopName) + } +} + +class ReParentCircuit(newTopName: String) extends Transform with PassBased { + def inputForm = HighForm + def outputForm = HighForm + def passSeq = Seq(new ReParentCircuitPass(newTopName)) + + def execute(state: CircuitState): CircuitState = { + CircuitState(runPasses(state.circuit), state.form) + } +} diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala new file mode 100644 index 000000000..d68edbea5 --- /dev/null +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -0,0 +1,59 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.passes.Pass + +// Removes all the unused modules in a circuit by recursing through every +// instance (starting at the main module) +class RemoveUnusedModulesPass extends Pass { + def name = "Remove Unused Modules" + + def run(c: Circuit): Circuit = { + val modulesByName = c.modules.map{ + case m: Module => (m.name, Some(m)) + case m: ExtModule => (m.name, None) + }.toMap + + def getUsedModules(om: Option[Module]): Set[String] = { + om match { + case Some(m) => { + def someStatements(statement: Statement): Seq[Statement] = + statement match { + case b: Block => + b.stmts.map{ someStatements(_) } + .foldLeft(Seq[Statement]())(_ ++ _) + case i: DefInstance => Seq(i) + case w: WDefInstance => Seq(w) + case _ => Seq() + } + + someStatements(m.body).map{ + case s: DefInstance => Set(s.module) | getUsedModules(modulesByName(s.module)) + case s: WDefInstance => Set(s.module) | getUsedModules(modulesByName(s.module)) + case _ => Set[String]() + }.foldLeft(Set(m.name))(_ | _) + } + + case None => Set.empty[String] + } + } + val usedModuleSet = getUsedModules(modulesByName(c.main)) + + val usedModuleSeq = c.modules.filter { usedModuleSet contains _.name } + + Circuit(c.info, usedModuleSeq, c.main) + } +} + +class RemoveUnusedModules extends Transform with PassBased { + def inputForm = MidForm + def outputForm = MidForm + def passSeq = Seq(new RemoveUnusedModulesPass) + + def execute(state: CircuitState): CircuitState = { + CircuitState(runPasses(state.circuit), state.form) + } +} diff --git a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala new file mode 100644 index 000000000..2a940563d --- /dev/null +++ b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala @@ -0,0 +1,42 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.passes.Pass + +// This doesn't rename ExtModules under the assumption that they're some +// Verilog black box and therefor can't be renamed. Since the point is to +// allow FIRRTL to be linked together using "cat" and ExtModules don't get +// emitted, this should be safe. +class RenameModulesAndInstancesPass(rename: (String) => String) extends Pass { + def name = "Rename Modules and Instances" + + def renameInstances(body: Statement): Statement = { + body match { + case m: DefInstance => new DefInstance(m.info, m.name, rename(m.module)) + case m: WDefInstance => new WDefInstance(m.info, m.name, rename(m.module), m.tpe) + case b: Block => new Block( b.stmts map { s => renameInstances(s) } ) + case s: Statement => s + } + } + + def run(c: Circuit): Circuit = { + val modulesx = c.modules.map { + case m: ExtModule => m + case m: Module => new Module(m.info, rename(m.name), m.ports, renameInstances(m.body)) + } + Circuit(c.info, modulesx, c.main) + } +} + +class RenameModulesAndInstances(rename: (String) => String) extends Transform with PassBased { + def inputForm = LowForm + def outputForm = LowForm + def passSeq = Seq(new RenameModulesAndInstancesPass(rename)) + + def execute(state: CircuitState): CircuitState = { + CircuitState(runPasses(state.circuit), state.form) + } +} From e09cbe5b7ee285780c54711c35e04c6b9fe689a4 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Wed, 22 Feb 2017 11:54:54 -0800 Subject: [PATCH 008/273] Create readme add a readme with a single pass some could write --- README.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..6429c26af --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +# barstools +Useful utilities for BAR projects + +Passes/Transforms that could be useful if added here: +* Check that a module was de-duplicated. Useful for MIM CAD flows and currently done in python. From f1c437f83079160fddcca8f6a102f605dab5ea31 Mon Sep 17 00:00:00 2001 From: Angie Wang Date: Sun, 5 Mar 2017 18:50:56 -0800 Subject: [PATCH 009/273] Add Pads + other utilities (#7) [stevo]: adds a bunch of pad frame commits, as well as beginning work on clocking annotations and constraints * start add io pads pass * save progress adding yaml pad info * saving some semi-presentable work -- parses yaml for pad templates and associates templates with ports * added black boxes to the module; still need to hook up * added supply pad yaml example; added option to not include pad for an IO, blackboxed that cat + bit extraction functions * rewrite createbbs and some other parts of the transform * finally got blackboxhelper to work -- seems there was a typo in the firrtl pass (?) have not connected them up properly in the padframe * finished first version of pad transform; need to add bells and whistles + special case stuff * made a bunch of changes in firrtl to shorthand things * done with padframe for signals * started major refactoring; first of pad yaml stuff * forgot to update verilogTemplate -> verilog * rename ParsePadYaml -> ChipPadsYaml; moved some stuff * separated out stuff that describes pads i.e. direction, type, side * forgot to update import for yamlhelpers * trying to make the process of creating annotations more structured * saving annotation helpers but prob better to switch to yaml * saving changes -- reworking annotations * fixing some bugs; properly annotated ports with pads * annotate supply pads * lesson (re)learned. cleaned up constants * finished adding supply pads to pad frame; still need to generate io file * also committing updated transform; still without io file * big typo was causing pad verilog files not to be generated * verilator passes with transform; had to fix verilog bb typo * added unused pads; added more thorough tests + did visual inspection of output; made some port types more explicit * renamed files/classes to be clearer * started creating pad io template * update spec so that transform order matters * get rid of logger * went around in circles with blackboxhelper + way to annotate * finished adding + testing pad.io creation * starting clkgen pass -- made model for asynchronously reset clk divider + wrappers for programmatic bundling * temporarily locating albert's utility functions here * saving work on clk constraints * redid input config passing -- pass in tech directory instead; seems like getting clk sink, src, and relationship works * not done; need to pause to do tapeout-y things. the clk gen pass gets all the clks and their sources, but i need to build a proper graph to handle clks coming out of muxes --- .gitignore | 7 +- build.sbt | 5 +- project/Dependencies.scala | 7 +- tapeout/src/main/resources/FoundryPads.yaml | 113 ++++++++ tapeout/src/main/resources/PadPlacement.yaml | 43 +++ .../transforms/clkgen/ClkAnnotations.scala | 248 ++++++++++++++++++ .../scala/transforms/clkgen/ClkDivider.scala | 128 +++++++++ .../transforms/clkgen/ClkSrcTransform.scala | 27 ++ .../clkgen/CreateClkConstraints.scala | 152 +++++++++++ .../transforms/pads/AddIOPadsTransform.scala | 57 ++++ .../scala/transforms/pads/AddPadFrame.scala | 135 ++++++++++ .../transforms/pads/AnnotatePortPads.scala | 135 ++++++++++ .../transforms/pads/AnnotateSupplyPads.scala | 56 ++++ .../transforms/pads/ChiselTopModule.scala | 76 ++++++ .../scala/transforms/pads/CreatePadBBs.scala | 109 ++++++++ .../transforms/pads/FoundryPadsYaml.scala | 95 +++++++ .../transforms/pads/PadAnnotations.scala | 133 ++++++++++ .../transforms/pads/PadDescriptors.scala | 56 ++++ .../scala/transforms/pads/PadPlacement.scala | 116 ++++++++ .../main/scala/transforms/utils/DiGraph.scala | 158 +++++++++++ .../scala/transforms/utils/FileUtils.scala | 65 +++++ .../transforms/utils/InstanceGraph.scala | 51 ++++ .../transforms/utils/LowerAnnotations.scala | 5 + .../transforms/utils/ProgrammaticBundle.scala | 24 ++ .../scala/transforms/utils/YamlHelpers.scala | 21 ++ .../test/resources/PadAnnotationVerilogPart.v | 231 ++++++++++++++++ tapeout/src/test/resources/PadPlacement.io | 236 +++++++++++++++++ .../scala/transforms/clkgen/ClkGenSpec.scala | 181 +++++++++++++ .../scala/transforms/pads/AddIOPadsSpec.scala | 226 ++++++++++++++++ 29 files changed, 2891 insertions(+), 5 deletions(-) create mode 100644 tapeout/src/main/resources/FoundryPads.yaml create mode 100644 tapeout/src/main/resources/PadPlacement.yaml create mode 100644 tapeout/src/main/scala/transforms/clkgen/ClkAnnotations.scala create mode 100644 tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala create mode 100644 tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala create mode 100644 tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala create mode 100644 tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala create mode 100644 tapeout/src/main/scala/transforms/pads/AddPadFrame.scala create mode 100644 tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala create mode 100644 tapeout/src/main/scala/transforms/pads/AnnotateSupplyPads.scala create mode 100644 tapeout/src/main/scala/transforms/pads/ChiselTopModule.scala create mode 100644 tapeout/src/main/scala/transforms/pads/CreatePadBBs.scala create mode 100644 tapeout/src/main/scala/transforms/pads/FoundryPadsYaml.scala create mode 100644 tapeout/src/main/scala/transforms/pads/PadAnnotations.scala create mode 100644 tapeout/src/main/scala/transforms/pads/PadDescriptors.scala create mode 100644 tapeout/src/main/scala/transforms/pads/PadPlacement.scala create mode 100644 tapeout/src/main/scala/transforms/utils/DiGraph.scala create mode 100644 tapeout/src/main/scala/transforms/utils/FileUtils.scala create mode 100644 tapeout/src/main/scala/transforms/utils/InstanceGraph.scala create mode 100644 tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala create mode 100644 tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala create mode 100644 tapeout/src/main/scala/transforms/utils/YamlHelpers.scala create mode 100644 tapeout/src/test/resources/PadAnnotationVerilogPart.v create mode 100644 tapeout/src/test/resources/PadPlacement.io create mode 100644 tapeout/src/test/scala/transforms/clkgen/ClkGenSpec.scala create mode 100644 tapeout/src/test/scala/transforms/pads/AddIOPadsSpec.scala diff --git a/.gitignore b/.gitignore index 2179f6e04..f4406576a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,11 @@ -### local stuff +### Local stuff +proprietary/ src/main/scala/dsptools/sandbox.sc test_run_dir/ *.fir +*.f *.anno + ### XilinxISE template # intermediate build files *.bgn @@ -342,4 +345,4 @@ project/plugins/project/ hs_err_pid* # ignore lib from rocket build -lib +lib \ No newline at end of file diff --git a/build.sbt b/build.sbt index 7374d9915..6f61668a8 100644 --- a/build.sbt +++ b/build.sbt @@ -6,7 +6,7 @@ lazy val commonSettings = Seq( organization := "edu.berkeley.cs", version := "0.1-SNAPSHOT", scalaVersion := "2.11.8", - scalacOptions := Seq("-deprecation", "-feature"), + scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls"), libraryDependencies ++= commonDependencies ) @@ -21,4 +21,5 @@ lazy val tapeout = (project in file("tapeout")) libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) } - ) \ No newline at end of file + ) + .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) \ No newline at end of file diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 6395b9806..f4423b4db 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -7,9 +7,14 @@ object Dependencies { val scalacheckVersion = "1.12.4" val scalacheck = "org.scalacheck" %% "scalacheck" % scalacheckVersion % "test" + // Templating! + val handlebarsVersion = "2.1.1" + val handlebars = "com.gilt" %% "handlebars-scala" % handlebarsVersion + val commonDependencies: Seq[ModuleID] = Seq( scalatest, - scalacheck + scalacheck, + handlebars ) } \ No newline at end of file diff --git a/tapeout/src/main/resources/FoundryPads.yaml b/tapeout/src/main/resources/FoundryPads.yaml new file mode 100644 index 000000000..a6133be0c --- /dev/null +++ b/tapeout/src/main/resources/FoundryPads.yaml @@ -0,0 +1,113 @@ +# Pad types must be one of digital, analog, or supply; pad names must be unique! +# This just shows you how you can template things with {{}}, if/else, and the following parameters: +# isInput: Boolean (each digital pad entry should be configurable between both input and output) +# isHorizontal: Boolean (each pad entry should be configurable between both horizontal and vertical) +# NOTE: Expects 1-bit in/out to be named in/out for digital; and 1-bit io for analog (supplies don't have ports) +# Expects module name to be obtained from {{name}} which is derived from yaml name, tpe in the Firrtl pass +# Pipe is used for stripping margins, but indentation is required before the pipe for the yaml reader to work +--- +tpe: analog +name: slow_foundry +width: 0 +height: 0 +verilog: | + |// Foundry Analog Pad Example + |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} + |// Call your instance PAD + |module {{name}}( + | inout io + |); + |endmodule +--- +tpe: analog +name: fast_custom +width: 0 +height: 0 +verilog: | + |// Custom Analog Pad Example + |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} + |// Call your instance PAD + |module {{name}}( + | inout io + |); + |endmodule +--- +tpe: digital +name: from_tristate_foundry +width: 0 +height: 0 +verilog: | + |// Digital Pad Example + |// Signal Direction: {{#if isInput}}Input{{else}}Output{{/if}} + |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} + |// Call your instance PAD + |module {{name}}( + | input in, + | output reg out + |); + | // Where you would normally dump your pad instance + | always @* begin + | out = in; + | end + |endmodule +--- +tpe: digital +name: fake_digital +width: 0 +height: 0 +verilog: | + |// (Fake/Unused) Digital Pad Example + |// Signal Direction: {{#if isInput}}Input{{else}}Output{{/if}} + |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} + |// Call your instance PAD + |module {{name}}( + | input in, + | output reg out + |); + | // Where you would normally dump your pad instance + | always @* begin + | out = in; + | end + |endmodule +--- +tpe: supply +name: vdd +width: 0 +height: 0 +supplySetNum: 1 +verilog: | + |// VDD Pad Example (No IO) + |// Can group some number together as required by the foundry + |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} + |// Call your instance array PAD[0:0], PAD[2:0], etc. + |module {{name}}( + |); + |endmodule +--- +tpe: supply +name: vss +width: 0 +height: 0 +supplySetNum: 2 +verilog: | + |// VSS Pad Example (No IO) + |// Can group some number together as required by the foundry + |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} + |// Call your instance array PAD[0:0], PAD[2:0], etc. + |module {{name}}( + |); + |endmodule +--- +tpe: supply +name: avss +width: 0 +height: 0 +supplySetNum: 1 +verilog: | + |// Analog VSS Pad Example (No IO) + |// Can group some number together as required by the foundry + |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} + |// Call your instance array PAD[0:0], PAD[2:0], etc. + |module {{name}}( + |); + |endmodule diff --git a/tapeout/src/main/resources/PadPlacement.yaml b/tapeout/src/main/resources/PadPlacement.yaml new file mode 100644 index 000000000..a8a94f1cd --- /dev/null +++ b/tapeout/src/main/resources/PadPlacement.yaml @@ -0,0 +1,43 @@ +# Example for Innovus: https://legacy.iis.ee.ethz.ch/~vlsi2/ex05/ex05.pdf +--- +file: pads.io +left: "1" # Bottom to top +top: "2" # Left to right +right: "3" # Bottom to top +bottom: "4" # Left to right +# Note: In your scripts, you should specify instance array styles +# i.e. hdl_instance_array_naming_style string (For Genus) +instanceArray: "{{signal}}[{{idx}}]" +padLine: | + | (inst name = "{{padInst}}") # Side: {{side}}, Order: {{padIdx}} +template: | + |(globals + | version = 3 + | io_order = default + |) + |(iopad + | (bottomleft + | (inst name="corner_ll" cell="CORNER_EXAMPLE" ) + | ) + | (bottomright + | (inst name="corner_lr" orientation=MY cell="CORNER_EXAMPLE" ) + | ) + | (topleft + | (inst name="corner_ul" orientation=MX cell="CORNER_EXAMPLE" ) + | ) + | (topright + | (inst name="corner_ur" cell="CORNER_EXAMPLE" ) + | ) + | (left + |{{leftPads}} + | ) + | (right + |{{rightPads}} + | ) + | (top + |{{topPads}} + | ) + | (bottom + |{{bottomPads}} + | ) + |) \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkAnnotations.scala b/tapeout/src/main/scala/transforms/clkgen/ClkAnnotations.scala new file mode 100644 index 000000000..832cfbb12 --- /dev/null +++ b/tapeout/src/main/scala/transforms/clkgen/ClkAnnotations.scala @@ -0,0 +1,248 @@ +package barstools.tapeout.transforms.clkgen + +import net.jcazevedo.moultingyaml._ +import firrtl.annotations._ +import chisel3.experimental._ +import chisel3._ +import firrtl._ +import firrtl.transforms.DedupModules + +object ClkAnnotationsYaml extends DefaultYamlProtocol { + implicit val _clksrc = yamlFormat3(ClkSrc) + implicit val _sink = yamlFormat1(Sink) + implicit val _clkport = yamlFormat2(ClkPortAnnotation) + implicit val _genclk = yamlFormat4(GeneratedClk) + implicit val _clkmod = yamlFormat2(ClkModAnnotation) +} +case class ClkSrc(period: Double, waveform: Seq[Double] = Seq(), async: Seq[String] = Seq()) { + def getWaveform = if (waveform == Seq.empty) Seq(0, period/2) else waveform + // async = ids of top level clocks that are async with this clk + // Default is 50% duty cycle, period units is default + require(getWaveform.sorted == getWaveform, "Waveform edges must be in order") + require(getWaveform.length == 2, "Must specify time for rising edge, then time for falling edge") +} + +case class Sink(src: Option[ClkSrc] = None) + +case class ClkPortAnnotation(tag: Option[Sink] = None, id: String) { + import ClkAnnotationsYaml._ + def serialize: String = this.toYaml.prettyPrint +} + +abstract class ClkModType { + def serialize: String +} +case object ClkMux extends ClkModType { + def serialize: String = "mux" +} +case object ClkDiv extends ClkModType { + def serialize: String = "div" +} +case object ClkGen extends ClkModType { + def serialize: String = "gen" +} + +// Unlike typical SDC, starts at 0. +// Otherwise, see pg. 63 of "Constraining Designs for Synthesis and Timing Analysis" +// by S. Gangadharan +// original clk: |-----|_____|-----|_____| +// edges: 0 1 2 3 4 +// div. by 4, 50% duty cycle --> edges = 0, 2, 4 +// ---> |-----------|___________| +// sources = source id's +case class GeneratedClk( + id: String, + sources: Seq[String] = Seq(), + referenceEdges: Seq[Int] = Seq(), + period: Option[Double] = None) { + require(referenceEdges.sorted == referenceEdges, "Edges must be in order for generated clk") + if (referenceEdges.nonEmpty) require(referenceEdges.length % 2 == 1, "# of reference edges must be odd!") +} + +case class ClkModAnnotation(tpe: String, generatedClks: Seq[GeneratedClk]) { + + def modType: ClkModType = HasClkAnnotation.modType(tpe) + + modType match { + case ClkDiv => + generatedClks foreach { c => + require(c.referenceEdges.nonEmpty, "Reference edges must be defined for clk divider!") + require(c.sources.length == 1, "Clk divider output can only have 1 source") + require(c.period.isEmpty, "No period should be specified for clk divider output") + } + case ClkMux => + generatedClks foreach { c => + require(c.referenceEdges.isEmpty, "Reference edges must not be defined for clk mux!") + require(c.period.isEmpty, "No period should be specified for clk mux output") + require(c.sources.nonEmpty, "Clk muxes must have sources!") + } + case ClkGen => + generatedClks foreach { c => + require(c.referenceEdges.isEmpty, "Reference edges must not be defined for clk gen!") + require(c.sources.isEmpty, "Clk generators shouldn't have constrained sources") + require(c.period.nonEmpty, "Clk generator output period should be specified!") + } + } + import ClkAnnotationsYaml._ + def serialize: String = this.toYaml.prettyPrint +} + +abstract class FirrtlClkTransformAnnotation { + def targetName: String +} + +// Firrtl version +case class TargetClkModAnnoF(target: ModuleName, anno: ClkModAnnotation) extends FirrtlClkTransformAnnotation { + def getAnno = Annotation(target, classOf[ClkSrcTransform], anno.serialize) + def targetName = target.name + def modType = anno.modType + def generatedClks = anno.generatedClks + def getAllClkPorts = anno.generatedClks.map(x => + List(List(x.id), x.sources).flatten).flatten.distinct.map(Seq(targetName, _).mkString(".")) +} + +// Chisel version +case class TargetClkModAnnoC(target: Module, anno: ClkModAnnotation) { + def getAnno = ChiselAnnotation(target, classOf[ClkSrcTransform], anno.serialize) +} + +// Firrtl version +case class TargetClkPortAnnoF(target: ComponentName, anno: ClkPortAnnotation) extends FirrtlClkTransformAnnotation { + def getAnno = Annotation(target, classOf[ClkSrcTransform], anno.serialize) + def targetName = Seq(target.module.name, target.name).mkString(".") + def modId = Seq(target.module.name, anno.id).mkString(".") + def sink = anno.tag +} + +// Chisel version +case class TargetClkPortAnnoC(target: Element, anno: ClkPortAnnotation) { + def getAnno = ChiselAnnotation(target, classOf[ClkSrcTransform], anno.serialize) +} + +object HasClkAnnotation { + + import ClkAnnotationsYaml._ + + def modType(tpe: String): ClkModType = tpe match { + case s: String if s == ClkMux.serialize => ClkMux + case s: String if s == ClkDiv.serialize => ClkDiv + case s: String if s == ClkGen.serialize => ClkGen + case _ => throw new Exception("Clock module annotaiton type invalid") + } + + def unapply(a: Annotation): Option[FirrtlClkTransformAnnotation] = a match { + case Annotation(f, t, s) if t == classOf[ClkSrcTransform] => f match { + case m: ModuleName => + Some(TargetClkModAnnoF(m, s.parseYaml.convertTo[ClkModAnnotation])) + case c: ComponentName => + Some(TargetClkPortAnnoF(c, s.parseYaml.convertTo[ClkPortAnnotation])) + case _ => throw new Exception("Clk source annotation only valid on module or component!") + } + case _ => None + } + + def apply(annos: Seq[Annotation]): Option[(Seq[TargetClkModAnnoF],Seq[TargetClkPortAnnoF])] = { + // Get all clk-related annotations + val clkAnnos = annos.map(x => unapply(x)).flatten + val targets = clkAnnos.map(x => x.targetName) + require(targets.distinct.length == targets.length, "Only 1 clk related annotation is allowed per component/module") + if (clkAnnos.length == 0) None + else { + val componentAnnos = clkAnnos.filter { + case TargetClkPortAnnoF(ComponentName(_, ModuleName(_, _)), _) => true + case _ => false + }.map(x => x.asInstanceOf[TargetClkPortAnnoF]) + val associatedMods = componentAnnos.map(x => x.target.module.name) + val moduleAnnos = clkAnnos.filter { + case TargetClkModAnnoF(ModuleName(m, _), _) => + require(associatedMods contains m, "Clk modules should always have clk port annotations!") + true + case _ => false + }.map(x => x.asInstanceOf[TargetClkModAnnoF]) + Some((moduleAnnos, componentAnnos)) + } + } + +} + +// Applies to both black box + normal module +trait IsClkModule { + + self: chisel3.Module => + + private def doNotDedup(module: Module): Unit = { + annotate(ChiselAnnotation(module, classOf[DedupModules], "nodedup!")) + } + doNotDedup(this) + + private def extractElementNames(signal: Data): Seq[String] = { + val names = signal match { + case elt: Record => + elt.elements.map { case (key, value) => extractElementNames(value).map(x => key + "_" + x) }.toSeq.flatten + case elt: Vec[_] => + elt.zipWithIndex.map { case (elt, i) => extractElementNames(elt).map(x => i + "_" + x) }.toSeq.flatten + case elt: Element => Seq("") + case elt => throw new Exception(s"Cannot extractElementNames for type ${elt.getClass}") + } + names.map(s => s.stripSuffix("_")) + } + + // TODO: Replace! + def extractElements(signal: Data): Seq[Element] = { + signal match { + case elt: Record => + elt.elements.map { case (key, value) => extractElements(value) }.toSeq.flatten + case elt: Vec[_] => + elt.map { elt => extractElements(elt) }.toSeq.flatten + case elt: Element => Seq(elt) + case elt => throw new Exception(s"Cannot extractElements for type ${elt.getClass}") + } + } + + def getIOName(signal: Element): String = { + val possibleNames = extractElements(io).zip(extractElementNames(io)).map { + case (sig, name) if sig == signal => Some(name) + case _ => None + }.flatten + if (possibleNames.length == 1) possibleNames.head + else throw new Exception("You can only get the name of an io port!") + } + + def annotateDerivedClks(tpe: ClkModType, generatedClks: Seq[GeneratedClk]): Unit = + annotateDerivedClks(ClkModAnnotation(tpe.serialize, generatedClks)) + def annotateDerivedClks(anno: ClkModAnnotation): Unit = annotateDerivedClks(this, anno) + def annotateDerivedClks(m: Module, anno: ClkModAnnotation): Unit = + annotate(TargetClkModAnnoC(m, anno).getAnno) + + def annotateClkPort(p: Element): Unit = annotateClkPort(p, None, "") + def annotateClkPort(p: Element, sink: Sink): Unit = annotateClkPort(p, Some(sink), "") + def annotateClkPort(p: Element, id: String): Unit = annotateClkPort(p, None, id) + def annotateClkPort(p: Element, sink: Sink, id: String): Unit = annotateClkPort(p, Some(sink), id) + def annotateClkPort(p: Element, sink: Option[Sink], id: String): Unit = { + // If no id is specified, it'll try to figure out a name, assuming p is an io port + val newId = id match { + case "" => + getIOName(p) + case _ => id + } + annotateClkPort(p, ClkPortAnnotation(sink, newId)) + } + + def annotateClkPort(p: Element, anno: ClkPortAnnotation): Unit = { + p.dir match { + case chisel3.core.Direction.Input => + require(anno.tag.nonEmpty, "Module inputs must be clk sinks") + require(anno.tag.get.src.isEmpty, + "Clock module (not top) input clks should not have clk period, etc. specified") + case chisel3.core.Direction.Output => + require(anno.tag.isEmpty, "Module outputs must not be clk sinks (they're sources!)") + case _ => + throw new Exception("Clk port direction must be specified!") + } + p match { + case _: chisel3.core.Clock => + case _ => throw new Exception("Clock port must be of type Clock") + } + annotate(TargetClkPortAnnoC(p, anno).getAnno) + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala b/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala new file mode 100644 index 000000000..755a66aaa --- /dev/null +++ b/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala @@ -0,0 +1,128 @@ +package barstools.tapeout.transforms.clkgen + +import chisel3.experimental.{withClockAndReset, withClock, withReset} +import chisel3._ +import chisel3.util.RegInit +import barstools.tapeout.transforms._ +import chisel3.util.HasBlackBoxInline + +// WARNING: ONLY WORKS WITH VERILATOR B/C YOU NEED ASYNC RESET! + +class SEClkDividerIO(phases: Seq[Int]) extends Bundle { + val reset = Input(Bool()) + val inClk = Input(Clock()) + val outClks = Output(CustomIndexedBundle(Clock(), phases)) + override def cloneType = (new SEClkDividerIO(phases)).asInstanceOf[this.type] +} + +class SEClkDividerBB(phases: Seq[Int], f: String) extends BlackBox with HasBlackBoxInline { + val verilog = scala.io.Source.fromFile(f).getLines.mkString("\n") + // names without io + val io = IO(new SEClkDividerIO(phases)) + val modName = this.getClass.getSimpleName + require(verilog contains modName, "Clk divider Verilog module must be named ClkDividerBB") + io.elements foreach { case (field, elt) => + require(verilog contains field, s"Verilog file should contain io ${field}")} + setInline(s"${modName}.v", verilog) +} + +class AsyncRegInit extends BlackBox with HasBlackBoxInline { + val io = IO(new Bundle { + val clk = Input(Clock()) + val reset = Input(Bool()) + val init = Input(Bool()) + val in = Input(Bool()) + val out = Output(Bool()) + }) + + setInline("AsyncRegInit.v", + s""" + |module AsyncRegInit( + | input clk, + | input reset, + | input init, + | input in, + | output reg out + |); + | always @ (posedge clk or posedge reset) begin + | if (reset) begin + | out <= init; + | end else begin + | out <= in; + | end + | end + |endmodule + """.stripMargin) +} + +object AsyncRegInit { + def apply(clk: Clock, reset: Bool, init: Bool): AsyncRegInit = { + val asyncRegInit = Module(new AsyncRegInit) + asyncRegInit.io.clk := clk + asyncRegInit.io.reset := reset + asyncRegInit.io.init := init + asyncRegInit + } +} + +// TODO: Convert analogFile into implicit? +// If syncReset = false, it's implied that reset is strobed before any clk rising edge happens +// i.e. when this is a clkgen fed by another clkgen --> need to adjust the indexing b/c +// you're already shifting on the first clk rising edge +class SEClkDivider(divBy: Int, phases: Seq[Int], analogFile: String = "", syncReset: Boolean = true) + extends Module with IsClkModule { + + require(phases.distinct.length == phases.length, "Phases should be distinct!") + + val io = IO(new SEClkDividerIO(phases)) + + annotateClkPort(io.inClk, Sink()) + + val referenceEdges = phases.map(p => Seq(2 * p, 2 * (p + 1), 2 * (p + divBy))) + + val generatedClks = io.outClks.elements.zip(referenceEdges).map { case ((field, eltx), edges) => + val elt = eltx.asInstanceOf[Element] + annotateClkPort(elt) + GeneratedClk(getIOName(elt), sources = Seq(getIOName(io.inClk)), edges) + }.toSeq + + annotateDerivedClks(ClkDiv, generatedClks) + + require(divBy >= 1, "Clk division factor must be >= 1") + + divBy match { + case i: Int if i == 1 => + require(phases == Seq(0), "Clk division by 1 shouldn't generate new phases") + io.outClks(0) := io.inClk + case i: Int if i > 1 && analogFile == "" => + // Shift register based clock divider (duty cycle is NOT 50%) + val initVals = Seq(true.B) ++ Seq.fill(divBy - 1)(false.B) + + /************ Real design assumes asnyc reset!!! + withClockAndReset(io.inClk, io.reset) { + val regs = initVals.map(i => RegInit(i)) + // Close the loop + regs.head := regs.last + // Shift register + regs.tail.zip(regs.init) foreach { case (lhs, rhs) => lhs := rhs } + // Assign register output to correct clk out + phases foreach { idx => io.outClks(idx) := regs(idx).asClock } + } + *************/ + + val regs = initVals.map(i => AsyncRegInit(io.inClk, io.reset, i)) + regs.head.io.in := regs.last.io.out + regs.tail.zip(regs.init) foreach { case (lhs, rhs) => lhs.io.in := rhs.io.out } + phases foreach { idx => + val regIdx = if (syncReset) idx else (idx + 1) % divBy + io.outClks(idx) := regs(regIdx).io.out.asClock + } + + case _ => + if (new java.io.File(analogFile).exists) { + val bb = Module(new SEClkDividerBB(phases, analogFile)) + io <> bb.io + } + else throw new Exception("Clock divider Verilog file invalid!") + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala new file mode 100644 index 000000000..a003abf8b --- /dev/null +++ b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala @@ -0,0 +1,27 @@ +package barstools.tapeout.transforms.clkgen + +import firrtl._ +import firrtl.annotations._ +import firrtl.passes._ +import firrtl.ir._ + +class ClkSrcTransform extends Transform with SimpleRun { + + override def inputForm: CircuitForm = LowForm + override def outputForm: CircuitForm = LowForm + + override def execute(state: CircuitState): CircuitState = { + val collectedAnnos = HasClkAnnotation(getMyAnnotations(state)) + collectedAnnos match { + // Transform not used + case None => CircuitState(state.circuit, LowForm) + case Some((clkModAnnos, clkPortAnnos)) => + val targetDir = barstools.tapeout.transforms.GetTargetDir(state) + val passSeq = Seq( + InferTypes, + new CreateClkConstraints(clkModAnnos, clkPortAnnos, targetDir) + ) + CircuitState(runPasses(state.circuit, passSeq), LowForm) + } + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala new file mode 100644 index 000000000..2e5021de5 --- /dev/null +++ b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala @@ -0,0 +1,152 @@ +// See license file for details + +package barstools.tapeout.transforms.clkgen + +import firrtl.passes.clocklist._ +import firrtl.annotations._ +import firrtl.ir._ +import firrtl.Utils._ +import barstools.tapeout.transforms._ +import scala.collection.immutable.ListMap + +// TODO: Really should be moved out of memlib +import firrtl.passes.memlib.AnalysisUtils._ +import firrtl.passes._ + +// TODO: Wait until Albert merges into firrtl +import firrtl.analyses._ + +class CreateClkConstraints( + clkModAnnos: Seq[TargetClkModAnnoF], + clkPortAnnos: Seq[TargetClkPortAnnoF], + targetDir: String) extends Pass { + + def name = "Create clock constraints" + + // TODO: Are annotations only valid on ports? + + def run(c: Circuit): Circuit = { + + val top = c.main + + // Remove everything from the circuit, unless it has a clock type + // This simplifies the circuit drastically so InlineInstances doesn't take forever. + val onlyClockCircuit = RemoveAllButClocks.run(c) + + val instanceGraph = new InstanceGraph(onlyClockCircuit) + + val clkModNames = clkModAnnos.map(x => x.targetName) + // ** Module name -> Absolute path of (unique) instance + val clkMods = clkModNames.map { x => + // NoDeDup was run so only 1 instance of each module should exist + val inst = instanceGraph.findInstancesInHierarchy(x) + require(inst.length == 1, "Clk modules should have not ben dedup-ed") + // Return map of module name to absolute path as a string + // Note: absolute path doesn't contain top module + to work with inlineInstances, + // delimit with $ + x -> inst.head.tail.map(y => y.name).mkString("$") + }.toMap + + val clkPortIds = clkPortAnnos.map { a => a.modId } + require(clkPortIds.distinct.length == clkPortIds.length, "All clk port IDs must be unique!") + + val allModClkPorts = clkModAnnos.map { x => + val modClkPorts = x.getAllClkPorts + require(modClkPorts.intersect(clkPortIds).length == modClkPorts.length, + "Clks given relationships via clk modules must have been annotated as clk ports") + modClkPorts + }.flatten.distinct + + val clkPortMap = clkPortIds.zip(clkPortAnnos).toMap + val clkModMap = clkModNames.zip(clkModAnnos).toMap + + val (clkSinksTemp, clkSrcsTemp) = clkPortAnnos.partition { + case TargetClkPortAnnoF(_, ClkPortAnnotation(tag, _)) if tag.nonEmpty => true + case _ => false + } + + def convertClkPortAnnoToMap(annos: Seq[TargetClkPortAnnoF]): ListMap[String, String] = + ListMap(annos.map { x => + val target = x.target + val absPath = { + if (top == target.module.name) LowerName(target.name) + else Seq(clkMods(target.module.name), LowerName(target.name)).mkString(".") + } + x.modId -> absPath + }.sortBy(_._1): _*) + + // ** clk port -> absolute path + val clkSinks = convertClkPortAnnoToMap(clkSinksTemp) + val clkSrcs = convertClkPortAnnoToMap(clkSrcsTemp) + + clkSrcs foreach { case (id, path) => + require(allModClkPorts contains id, "All clock source properties must be defined by their respective modules") } + + // Don't inline clock modules + val modulesToInline = (c.modules.collect { + case Module(_, n, _, _) if n != top && !clkModNames.contains(n) => + ModuleName(n, CircuitName(top)) + }).toSet + + val inlineTransform = new InlineInstances + val inlinedCircuit = inlineTransform.run(onlyClockCircuit, modulesToInline, Set()).circuit + + val topModule = inlinedCircuit.modules.find(_.name == top).getOrElse(throwInternalError) + + // Build a hashmap of connections to use for getOrigins + val connects = getConnects(topModule) + + // Clk sinks are either inputs to clock modules or top clk inputs --> separate + // ** clk port -> absolute path + val (topClks, clkModSinks) = clkSinks.partition { + case (modId, absPath) if modId.split("\\.").head == top => true + case _ => false + } + + // Must be 1:1 originally! + def flipMapping(m: ListMap[String, String]): ListMap[String, String] = + m.map { case (a, b) => b -> a } + + val clkSrcsFlip = flipMapping(clkSrcs) + val topClksFlip = flipMapping(topClks) + + // Find origins of clk mod sinks + val clkModSinkToSourceMap = clkModSinks.map { case (sinkId, sinkAbsPath) => + val sourceAbsPath = getOrigin(connects, sinkAbsPath).serialize + val sourceId = { + // sources of sinks are generated clks or top level clk inputs + if (clkSrcsFlip.contains(sourceAbsPath)) clkSrcsFlip(sourceAbsPath) + else if (topClksFlip.contains(sourceAbsPath)) topClksFlip(sourceAbsPath) + else throw new Exception(s"Absolute path of clk source for $sinkId not found!") + } + sinkId -> sourceId + } + + c.modules.foreach { + case mod: DefModule => + mod.ports.foreach { + case Port(_, n, dir, tpe) + if tpe == ClockType && + ((dir == Input && mod.name == top) || (dir == Output && clkModNames.contains(mod.name))) => + clkPortAnnos.find(x => + // TODO: Not sufficiently general for output clks? Might have forgotten to label a clk module... + LowerName(x.target.name) == n && x.target.module.name == mod.name).getOrElse( + throw new Exception("All top module input clks/clk module output clocks must be sinks/sources!")) + case _ => + } + } + + // Find sinks used to derive clk mod sources + val clkModSourceToSinkMap: Seq[(String, Seq[String])] = clkModAnnos.map(x => { + val modName = x.targetName + x.generatedClks.map(y => Seq(modName, y.id).mkString(".") -> y.sources.map(z => Seq(modName, z).mkString("."))) + } ).flatten + + topClks.foreach {x => println(s"top clk: $x")} + clkModSinks.foreach { x => println(s"clk sink: $x")} + clkSrcs.foreach { x => println(s"gen clk: $x")} + clkModSinkToSourceMap.foreach { x => println(s"sink -> src: $x")} + clkModSourceToSinkMap.foreach { x => println(s"src -> dependent sinks: $x")} + c + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala new file mode 100644 index 000000000..2531a6321 --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala @@ -0,0 +1,57 @@ +package barstools.tapeout.transforms.pads + +import firrtl._ +import firrtl.annotations._ +import firrtl.passes._ +import firrtl.ir._ +import barstools.tapeout.transforms._ + +// Main Add IO Pad transform operates on low Firrtl +class AddIOPadsTransform extends Transform with SimpleRun { + + override def inputForm: CircuitForm = LowForm + override def outputForm: CircuitForm = LowForm + + override def execute(state: CircuitState): CircuitState = { + val collectedAnnos = HasPadAnnotation(getMyAnnotations(state)) + collectedAnnos match { + // Transform not used + case None => CircuitState(state.circuit, LowForm) + case Some(x) => + val techLoc = (new TechnologyLocation).get(state) + // Get foundry pad templates from yaml + val foundryPads = FoundryPadsYaml.parse(techLoc) + val portPads = AnnotatePortPads(state.circuit, x.topModName, foundryPads, x.componentAnnos, + HasPadAnnotation.getSide(x.defaultPadSide)) + val supplyPads = AnnotateSupplyPads(foundryPads, x.supplyAnnos) + val (circuitWithBBs, bbAnnotations) = CreatePadBBs(state.circuit, portPads, supplyPads) + val namespace = Namespace(state.circuit) + val padFrameName = namespace newName s"${x.topModName}_PadFrame" + val topInternalName = namespace newName s"${x.topModName}_Internal" + val targetDir = barstools.tapeout.transforms.GetTargetDir(state) + PadPlacementFile.generate(techLoc, targetDir, padFrameName, portPads, supplyPads) + val passSeq = Seq( + Legalize, + ResolveGenders, + // Types really need to be known... + InferTypes, + new AddPadFrame(x.topModName, padFrameName, topInternalName, portPads, supplyPads), + RemoveEmpty, + CheckInitialization, + InferTypes, + Uniquify, + ResolveKinds, + ResolveGenders + ) + // Expects BlackBox helper to be run after to inline pad Verilog! + val prevAnnos = state.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations + val cs = CircuitState( + runPasses(circuitWithBBs, passSeq), + LowForm, + Some(AnnotationMap(prevAnnos ++ bbAnnotations)) + ) + // TODO: *.f file is overwritten on subsequent executions, but it doesn't seem to be used anywhere? + (new firrtl.transforms.BlackBoxSourceHelper).execute(cs) + } + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/AddPadFrame.scala b/tapeout/src/main/scala/transforms/pads/AddPadFrame.scala new file mode 100644 index 000000000..17b22fbb9 --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/AddPadFrame.scala @@ -0,0 +1,135 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.pads + +import firrtl.annotations._ +import firrtl.ir._ +import firrtl._ +import firrtl.passes._ + +// Analog is like UInt, SInt; it's not a direction (which is kind of weird) +// WARNING: Analog type is associated with Verilog InOut! i.e. even if digital pads are tri-statable, b/c tristate +// requires an additional ctrl signal, digital pads must be operated in a single "static" condition here; Analog will +// be paired with analog pads + +class AddPadFrame( + topMod: String, + padFrameName: String, + topInternalName: String, + ioPads: Seq[PortIOPad], + supplyPads: Seq[TopSupplyPad]) extends Pass { + + def name: String = "Add Padframe" + + def run(c: Circuit): Circuit = { + // New modules consist of old modules (with top renamed to internal) + padFrame + newTop + val newMods = c.modules.map { + case mod: Module if mod.name == topMod => + // Original top module is now internal module + mod.copy(name = topInternalName) + case m => m + } ++ Seq(buildPadFrame(), buildTopWrapper()) + + // Reparent so circuit top is whatever uses pads! + // TODO: Can the top level be a blackbox? + c.copy(modules = newMods, main = topMod) + } + + def intName(p: PortIOPad) = s"${p.portName}_Int" + def extName(p: PortIOPad) = s"${p.portName}_Ext" + + def buildTopWrapper(): Module = { + // outside -> padframe -> internal + // Top (with same name) contains 1) padframe + 2) internal signals + val padFrameInst = WDefInstance(padFrameName, padFrameName) + val topInternalInst = WDefInstance(topInternalName, topInternalName) + val padFrameRef = WRef(padFrameName) + val topInternalRef = WRef(topInternalName) + val connects = ioPads.map { p => + val io = WRef(p.portName) + val intIo = WSubField(topInternalRef, p.portName) + val padFrameIntIo = WSubField(padFrameRef, intName(p)) + val padFrameExtIo = WSubField(padFrameRef, extName(p)) + p.port.tpe match { + case AnalogType(_) => + // Analog pads only have 1 port + // If Analog port doesn't have associated pad, don't hook it up to the padframe + val analogAttachInt = Seq(Attach(NoInfo, Seq(io, intIo))) + if (p.pad.isEmpty) analogAttachInt + else analogAttachInt :+ Attach(NoInfo, Seq(io, padFrameExtIo)) + case _ => p.portDirection match { + case Input => + // input to padframe ; padframe to internal + Seq(Connect(NoInfo, padFrameExtIo, io), Connect(NoInfo, intIo, padFrameIntIo)) + case Output => + // internal to padframe ; padframe to output + Seq(Connect(NoInfo, padFrameIntIo, intIo), Connect(NoInfo, io, padFrameExtIo)) + } + } + }.flatten + val stmts = Seq(padFrameInst, topInternalInst) ++ connects + val ports = ioPads.map(p => p.port) + Module(NoInfo, topMod, ports = ports, body = Block(stmts)) + } + + def buildPadFrame(): Module = { + // Internal = connection to original RTL; External = connection to outside world + // Note that for analog pads, since there's only 1 port, only _Ext is used + val intPorts = ioPads.map(p => p.port.tpe match { + case AnalogType(_) => None + case _ => Some(p.port.copy(name = intName(p), direction = Utils.swap(p.portDirection))) + }).flatten + val extPorts = ioPads.map(p => p.port.tpe match { + // If an analog port doesn't have a pad associated with it, don't add it to the padframe + case AnalogType(_) if p.pad.isEmpty => None + case _ => Some(p.port.copy(name = extName(p))) + } ).flatten + // Only create pad black boxes for ports that require them + val ioPadInsts = ioPads.filter(x => !x.pad.isEmpty).map(p => WDefInstance(p.firrtlBBName, p.firrtlBBName)) + // Connect to pad only if used ; otherwise leave dangling for Analog + // and just connect through for digital (assumes no supplies) + val connects = ioPads.map { p => + val intRef = WRef(intName(p), p.port.tpe) + val extRef = WRef(extName(p), p.port.tpe) + p.pad match { + // No pad needed -- just connect through + case None => p.port.tpe match { + case AnalogType(_) => + Seq(EmptyStmt) + case _ => + val (lhs, rhs) = p.portDirection match { + case Input => (intRef, extRef) + case Output => (extRef, intRef) + } + Seq(Connect(NoInfo, lhs, rhs)) + } + // Add pad + case Some(x) => + val padRef = WRef(p.firrtlBBName) + p.port.tpe match { + // Analog type has 1:1 mapping to inout + case AnalogType(_) => + val padIORef = WSubField(padRef, AnalogPad.ioName) + Seq(Attach(NoInfo, Seq(padIORef, extRef))) + // Normal verilog in/out can be mapped to uint, sint, or clocktype, so need cast + case _ => + val padBBType = UIntType(getWidth(p.port.tpe)) + val padInRef = WSubField(padRef, DigitalPad.inName, padBBType, UNKNOWNGENDER) + val padOutRef = WSubField(padRef, DigitalPad.outName, padBBType, UNKNOWNGENDER) + val (rhsPadIn, lhsPadOut) = p.portDirection match { + case Input => (extRef, intRef) + case Output => (intRef, extRef) + } + // Pad inputs are treated as UInts, so need to do type conversion + // from type to UInt pad input; from pad output to type + Seq( + Connect(NoInfo, padInRef, castRhs(padBBType, rhsPadIn)), + Connect(NoInfo, lhsPadOut, castRhs(p.port.tpe, padOutRef))) + } + } + }.flatten + val supplyPadInsts = supplyPads.map(p => p.instNames.map(n => WDefInstance(n, p.firrtlBBName))).flatten + Module(NoInfo, padFrameName, ports = intPorts ++ extPorts, body = Block(ioPadInsts ++ connects ++ supplyPadInsts)) + } + +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala b/tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala new file mode 100644 index 000000000..1f6911a07 --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala @@ -0,0 +1,135 @@ +package barstools.tapeout.transforms.pads + +import firrtl.annotations._ +import firrtl._ +import firrtl.ir._ +import firrtl.passes._ +import barstools.tapeout.transforms._ + +// TODO: Make some trait with commonalities between IO Pad + supply pad + +// Pads associated with IO Ports! (Not supplies!) +case class PortIOPad( + pad: Option[FoundryPad], + padSide: PadSide, + port: Port) { + + def arrayInstNamePrefix(mod: String): String = Seq(mod, firrtlBBName, getPadName).mkString("/") + def arrayInstNameSuffix: String = pad match { + case None => throw new Exception("Port needs to use pad to get array instance name!") + case Some(x) => "/" + x.padInstName + } + + def portName = port.name + def portWidth = bitWidth(port.tpe).intValue + def portDirection = port.direction + def padOrientation = padSide.orientation + def padType = pad match { + case None => NoPad + case Some(x) => x.padType + } + + def widthParamName = "WIDTH" + def getPadName: String = pad match { + case None => throw new Exception("Cannot get pad name when no pad specified!") + case Some(x) => x.getName(portDirection, padOrientation) + } + def getPadArrayName: String = Seq(getPadName, "array").mkString("_") + // Firrtl black box name must be unique, even though the parameterized Verilog modules don't + // need to have separate names + def firrtlBBName = Seq(getPadArrayName, portName).mkString("_") + + // Note: This includes both the pad wrapper + an additional wrapper for n-bit wide to + // multiple pad conversion! + def createPadInline(): String = { + // For blackboxing bit extraction/concatenation (with module arrays) + def io(): String = padType match { + case DigitalPad => + s"""| input [${widthParamName}-1:0] ${DigitalPad.inName}, + | output reg [${widthParamName}-1:0] ${DigitalPad.outName}""".stripMargin + case AnalogPad => + s" inout [${widthParamName}-1:0] ${AnalogPad.ioName}" + case _ => throw new Exception("IO pad can only be digital or analog") + } + def assignIO(): String = padType match { + case DigitalPad => + s"""| .${DigitalPad.inName}(${DigitalPad.inName}), + | .${DigitalPad.outName}(${DigitalPad.outName})""".stripMargin + case AnalogPad => + s" .${AnalogPad.ioName}(${AnalogPad.ioName})" + case _ => throw new Exception("IO pad can only be digital or analog") + } + def getPadVerilog(): String = pad match { + case None => throw new Exception("Cannot get Verilog when no pad specified!") + case Some(x) => x.getVerilog(portDirection, padOrientation) + } + s"""inline + |${getPadArrayName}.v + |${getPadVerilog} + |module ${getPadArrayName} #( + | parameter int ${widthParamName}=1 + |)( + |${io} + |); + | ${getPadName} ${getPadName}[${widthParamName}-1:0]( + |${assignIO} + | ); + |endmodule""".stripMargin + } +} + +object AnnotatePortPads { + def apply( + c: Circuit, + topMod: String, + pads: Seq[FoundryPad], + componentAnnos: Seq[TargetIOPadAnnoF], + defaultSide: PadSide): Seq[PortIOPad] = { + + def lowerAnnotations(): Seq[TargetIOPadAnnoF] = { + componentAnnos map { x => x.target match { + case c: ComponentName => x.copy(target = c.copy(name = LowerName(c.name))) + case _ => throw new Exception("Not a component annotation! Can't lower!") + }} + } + + // Make annotations match low form + val annos = lowerAnnotations() + + def getPortIOPad(port: Port): PortIOPad = { + val portAnnos = annos.find(_.targetName == port.name) + // Ports can only be digital or analog + val padTypeRequired = port.tpe match { + case AnalogType(_) => AnalogPad + case _ => DigitalPad + } + val validPads = pads.filter(_.padType == padTypeRequired) + require(validPads.length > 0, s"No ${padTypeRequired.serialize} pads specified in the config yaml file!") + portAnnos match { + case None => + // If no pad-related annotation is found on a port, use defaults based off of port type + PortIOPad(Some(validPads.head), defaultSide, port) + case Some(x) => + x.anno match { + case NoIOPadAnnotation(_) => + // Some ports might not want attached pads + PortIOPad(None, defaultSide, port) + case IOPadAnnotation(padSide, padName) if padName.isEmpty => + // If no pad name is used, select the first valid pad based off of port type + PortIOPad(Some(validPads.head), HasPadAnnotation.getSide(padSide), port) + case IOPadAnnotation(padSide, padName) => + // If name doesn't match any provided -- maybe someone typoed? + validPads.find(_.name == padName) match { + case None => + throw new Exception( + s"Pad name associated with ${port.name} doesn't match valid pad names. Did you typo?") + case Some(x) => + PortIOPad(Some(x), HasPadAnnotation.getSide(padSide), port) + } + } + } + } + // Top MUST be internal module + c.modules.filter(_.name == topMod).head.ports.map(x => getPortIOPad(x)) + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/AnnotateSupplyPads.scala b/tapeout/src/main/scala/transforms/pads/AnnotateSupplyPads.scala new file mode 100644 index 000000000..27b93b46a --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/AnnotateSupplyPads.scala @@ -0,0 +1,56 @@ +package barstools.tapeout.transforms.pads + +import firrtl.annotations._ +import firrtl._ +import firrtl.ir._ +import firrtl.passes._ + +case class TopSupplyPad( + pad: FoundryPad, + padSide: PadSide, + num: Int +) { + + // TODO: These should be pulled into some common trait (supply + io)! + + def arrayInstNamePrefix(mod: String): Seq[String] = { + instNames.map(n => Seq(mod, n, pad.padInstName).mkString("/")) + } + def supplySetNum = pad.getSupplySetNum + + def padType = pad.padType + require(pad.padType == SupplyPad) + + def padOrientation = padSide.orientation + def getPadName = pad.getName(NoDirection, padOrientation) + def firrtlBBName = getPadName + private def instNamePrefix = Seq(firrtlBBName, padSide.serialize).mkString("_") + def instNames = (0 until num).map(i => Seq(instNamePrefix, i.toString).mkString("_")) + + def createPadInline(): String = { + def getPadVerilog(): String = pad.getVerilog(NoDirection, padOrientation) + s"""inline + |${getPadName}.v + |${getPadVerilog}""".stripMargin + } +} + +object AnnotateSupplyPads { + def apply( + pads: Seq[FoundryPad], + supplyAnnos: Seq[SupplyAnnotation] + ): Seq[TopSupplyPad] = { + supplyAnnos.map( a => + pads.find(_.name == a.padName) match { + case None => + throw new Exception(s"Supply pad ${a.padName} not found in Yaml file!") + case Some(x) => + Seq( + TopSupplyPad(x, Left, a.leftSide), + TopSupplyPad(x, Right, a.rightSide), + TopSupplyPad(x, Top, a.topSide), + TopSupplyPad(x, Bottom, a.bottomSide)) + } + ).flatten.filter(_.num > 0) + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/ChiselTopModule.scala b/tapeout/src/main/scala/transforms/pads/ChiselTopModule.scala new file mode 100644 index 000000000..8f891e62d --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/ChiselTopModule.scala @@ -0,0 +1,76 @@ +package barstools.tapeout.transforms.pads + +import chisel3._ +import barstools.tapeout.transforms.clkgen._ +import chisel3.experimental._ +import firrtl.transforms.DedupModules + +// TODO: Move out of pads + +// NOTE: You can't really annotate outside of the module itself UNLESS you break up the compile step in 2 i.e. +// annotate post-Chisel but pre-Firrtl (unfortunate non-generator friendly downside). +// It's recommended to have a Tapeout specific TopModule wrapper. +// LIMITATION: All signals of a bus must be on the same chip side + +// Chisel-y annotations +abstract class TopModule( + supplyAnnos: Seq[SupplyAnnotation] = Seq.empty, + defaultPadSide: PadSide = Top, + coreWidth: Int = 0, + coreHeight: Int = 0, + usePads: Boolean = true, + override_clock: Option[Clock] = None, + override_reset: Option[Bool] = None) extends Module(override_clock, override_reset) with IsClkModule { + + override def annotateClkPort(p: Element, anno: ClkPortAnnotation): Unit = { + p.dir match { + case chisel3.core.Direction.Input => + require(anno.tag.nonEmpty, "Top Module input clks must be clk sinks") + require(anno.tag.get.src.nonEmpty, + "Top module input clks must have clk period, etc. specified") + case _ => + throw new Exception("Clk port direction must be specified!") + } + p match { + case _: chisel3.core.Clock => + case _ => throw new Exception("Clock port must be of type Clock") + } + annotate(TargetClkPortAnnoC(p, anno).getAnno) + } + + override def annotateDerivedClks(m: Module, anno: ClkModAnnotation): Unit = + throw new Exception("Top module cannot be pure clock module!") + + // Annotate module as top module (that requires pad transform) + // Specify the yaml file that indicates how pads are templated, + // the default chip side that pads should be placed (if nothing is specified per IO), + // and supply annotations: supply pad name, location, and # + def createPads(): Unit = if (usePads) { + val modulePadAnnotation = ModulePadAnnotation( + defaultPadSide = defaultPadSide.serialize, + coreWidth = coreWidth, + coreHeight = coreHeight, + supplyAnnos = supplyAnnos + ) + annotate(TargetModulePadAnnoC(this, modulePadAnnotation).getAnno) + } + + // Annotate IO with side + pad name + def annotatePad(sig: Element, side: PadSide = defaultPadSide, name: String = ""): Unit = if (usePads) { + val anno = IOPadAnnotation(side.serialize, name) + annotate(TargetIOPadAnnoC(sig, anno).getAnno) + } + def annotatePad(sig: Aggregate, name: String): Unit = annotatePad(sig, side = defaultPadSide, name) + def annotatePad(sig: Aggregate, side: PadSide): Unit = annotatePad(sig, side, name = "") + def annotatePad(sig: Aggregate, side: PadSide, name: String): Unit = + extractElements(sig) foreach { x => annotatePad(x, side, name) } + + // There may be cases where pads were inserted elsewhere. If that's the case, allow certain IO to + // not have pads auto added. Note that annotatePad and noPad are mutually exclusive! + def noPad(sig: Element): Unit = if (usePads) annotate(TargetIOPadAnnoC(sig, NoIOPadAnnotation()).getAnno) + def noPad(sig: Aggregate): Unit = extractElements(sig) foreach { x => noPad(x) } + + // Since this is a super class, this should be the first thing that gets run + // (at least when the module is actually at the top -- currently no guarantees otherwise :( firrtl limitation) + createPads() +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/CreatePadBBs.scala b/tapeout/src/main/scala/transforms/pads/CreatePadBBs.scala new file mode 100644 index 000000000..1a7f2aa90 --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/CreatePadBBs.scala @@ -0,0 +1,109 @@ +package barstools.tapeout.transforms.pads + +import firrtl.annotations._ +import firrtl._ +import firrtl.ir._ +import firrtl.transforms._ + +object CreatePadBBs { + + private [barstools] case class UsedPadInfo( + // The following are found with both supply + io pads + padInline: String, // Verilog txt + padName: String, // Pad module name + padType: PadType, // Pad type: supply, analog, digital + // The following only affects io pads (due to using parameterized modules for bit extraction / cat) + padArrayName: String, // Name of parameterized pad wrapper (that does bit extract/cat) + firrtlBBName: String, // Unique Firrtl name of each parameterized pad wrapper + portWidth: Int // Port width for analog/digital + ) + + def convertToUsedPad(p: PortIOPad): UsedPadInfo = { + UsedPadInfo( + padInline = p.createPadInline, + padName = p.getPadName, + padType = p.padType, + padArrayName = p.getPadArrayName, + firrtlBBName = p.firrtlBBName, + portWidth = p.portWidth) + } + + def convertToUsedPad(p: TopSupplyPad): UsedPadInfo = { + UsedPadInfo( + padInline = p.createPadInline, + padName = p.getPadName, + padType = p.padType, + // Supply pads don't require bit extraction / cat so don't care + padArrayName = p.getPadName, + firrtlBBName = p.getPadName, + portWidth = 0) + } + + def checkLegalPadName(namespace: Namespace, usedPads: Seq[UsedPadInfo]): Unit = { + usedPads foreach { x => + if (namespace contains x.padName) + throw new Exception(s"Pad name ${x.padName} already used!") + if (namespace contains x.padArrayName) + throw new Exception(s"Pad array ${x.padArrayName} name already used!") + if (namespace contains x.firrtlBBName) + throw new Exception(s"Firrtl black box ${x.firrtlBBName} name already used!") + } + } + + def apply( + c: Circuit, + ioPads: Seq[PortIOPad], + supplyPads: Seq[TopSupplyPad]): (Circuit, Seq[Annotation]) = { + + // Add black boxes for both supply + (used) io pads + val usedPads = ioPads.filter(x => x.pad.nonEmpty).map(convertToUsedPad(_)) ++ supplyPads.map(convertToUsedPad(_)) + checkLegalPadName(Namespace(c), usedPads) + + // Note that we need to check for Firrtl name uniqueness here! (due to parameterization) + val uniqueExtMods = scala.collection.mutable.ArrayBuffer[UsedPadInfo]() + usedPads foreach { x => + if (uniqueExtMods.find(_.firrtlBBName == x.firrtlBBName).isEmpty) + uniqueExtMods += x + } + + // Collecting unique parameterized black boxes + // (for io, they're wrapped pads; for supply, they're pad modules directly) + val uniqueParameterizedBBs = scala.collection.mutable.ArrayBuffer[UsedPadInfo]() + uniqueExtMods foreach { x => + if (uniqueParameterizedBBs.find(_.padArrayName == x.padArrayName).isEmpty) + uniqueParameterizedBBs += x + } + + // Note: Firrtl is silly and doesn't implement true parameterization -- each module with + // parameterization that potentially affects # of IO needs to be uniquely identified + // (but only in Firrtl) + val bbs = uniqueExtMods.map(x => { + // Supply pads don't have ports + val ports = x.padType match { + case AnalogPad => Seq(Port(NoInfo, AnalogPad.ioName, Input, AnalogType(IntWidth(x.portWidth)))) + case DigitalPad => Seq( + Port(NoInfo, DigitalPad.inName, Input, UIntType(IntWidth(x.portWidth))), + Port(NoInfo, DigitalPad.outName, Output, UIntType(IntWidth(x.portWidth))) + ) + case SupplyPad => Seq.empty + case _ => throw new Exception("Port pad type invalid!") + } + // Supply black boxes are not parameterized + val params = x.padType match { + case AnalogPad | DigitalPad => Seq(IntParam(ioPads.head.widthParamName, x.portWidth)) + case SupplyPad => Seq() + case _ => throw new Exception("Port pad type invalid!") + } + // Firrtl name is unique + ExtModule(NoInfo, x.firrtlBBName, ports, x.padArrayName, params) + } ).toSeq + + // Add annotations to black boxes to inline Verilog from template + // Again, note the weirdness in parameterization -- just need to hook to one matching Firrtl instance + val annos = uniqueParameterizedBBs.map(x => + BlackBoxSourceAnnotation(ModuleName(x.firrtlBBName, CircuitName(c.main)), x.padInline) + ).toSeq + (c.copy(modules = c.modules ++ bbs), annos) + } + +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/FoundryPadsYaml.scala b/tapeout/src/main/scala/transforms/pads/FoundryPadsYaml.scala new file mode 100644 index 000000000..ff1b92f0d --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/FoundryPadsYaml.scala @@ -0,0 +1,95 @@ +package barstools.tapeout.transforms.pads + +import net.jcazevedo.moultingyaml._ + +import firrtl._ +import firrtl.ir._ +import barstools.tapeout.transforms._ + +case class FoundryPad( + tpe: String, + name: String, + width: Int, + height: Int, + supplySetNum: Option[Int], + verilog: String) { + + def padInstName = "PAD" + + require(verilog.contains("{{#if isHorizontal}}"), "All pad templates must contain '{{#if isHorizontal}}'") + require(verilog.contains("{{name}}"), "All pad templates must contain module name '{{name}}'") + require(verilog.contains(padInstName), s"All pad templates should have instances called ${padInstName}") + + def getSupplySetNum = supplySetNum.getOrElse(1) + + val padType = tpe match { + case "digital" => + require(verilog.contains(DigitalPad.inName), "Digital pad template must contain input called 'in'") + require(verilog.contains(DigitalPad.outName), "Digital pad template must contain output called 'out'") + require(verilog.contains("{{#if isInput}}"), "Digital pad template must contain '{{#if isInput}}'") + DigitalPad + case "analog" => + require(verilog.contains(AnalogPad.ioName), "Analog pad template must contain inout called 'io'") + require(!verilog.contains("{{#if isInput}}"), "Analog pad template must not contain '{{#if isInput}}'") + AnalogPad + case "supply" => + // Supply pads don't have IO + require(!verilog.contains("{{#if isInput}}"), "Supply pad template must not contain '{{#if isInput}}'") + require( + verilog.contains(s"${padInstName}["), "All supply pad templates should have instance arrays" + + " called ${padInstName}[n:0], where n = ${getSupplySetNum-1}") + require(supplySetNum.nonEmpty, "# of grouped supply pads 'supplySetNum' should be specified!") + SupplyPad + case _ => throw new Exception("Illegal pad type in config!") + } + + import com.gilt.handlebars.scala.binding.dynamic._ + import com.gilt.handlebars.scala.Handlebars + private val template = Handlebars(verilog) + + // Make sure names don't have spaces in Verilog! + private[barstools] val correctedName = name.replace(" ", "_") + + case class TemplateParams( + // isInput only used with digital pads + isInput: Boolean, + isHorizontal: Boolean) { + + private val orient = if (isHorizontal) Horizontal.serialize else Vertical.serialize + private val dir = padType match { + case AnalogPad => InOut.serialize + case SupplyPad => NoDirection.serialize + case DigitalPad => if (isInput) Input.serialize else Output.serialize + } + val name = { + val start = Seq("pad", tpe, correctedName, orient) + if (padType == DigitalPad) start :+ dir + else start + }.mkString("_") + } + + // Note: Analog + supply don't use direction + private def getTemplateParams(dir: Direction, orient: PadOrientation): TemplateParams = + TemplateParams(isInput = (dir == Input), isHorizontal = (orient == Horizontal)) + + def getVerilog(dir: Direction, orient: PadOrientation): String = { + val p = getTemplateParams(dir, orient) + template(p).stripMargin + } + + def getName(dir: Direction, orient: PadOrientation): String = getTemplateParams(dir, orient).name +} + +object FoundryPadsYaml extends DefaultYamlProtocol { + val exampleResource = "/FoundryPads.yaml" + implicit val _pad = yamlFormat6(FoundryPad) + def parse(techDir: String): Seq[FoundryPad] = { + val file = techDir + exampleResource + if(techDir != "" && !(new java.io.File(file)).exists()) + throw new Exception("Technology directory must contain FoundryPads.yaml!") + val out = (new YamlFileReader(exampleResource)).parse[FoundryPad](if (techDir == "") "" else file) + val padNames = out.map(x => x.correctedName) + require(padNames.distinct.length == padNames.length, "Pad names must be unique!") + out + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/PadAnnotations.scala b/tapeout/src/main/scala/transforms/pads/PadAnnotations.scala new file mode 100644 index 000000000..66b7f1843 --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/PadAnnotations.scala @@ -0,0 +1,133 @@ +package barstools.tapeout.transforms.pads + +import firrtl.annotations._ +import chisel3.experimental._ +import chisel3._ +import barstools.tapeout.transforms._ +import firrtl._ + +import net.jcazevedo.moultingyaml._ + +object PadAnnotationsYaml extends DefaultYamlProtocol { + implicit val _iopad = yamlFormat2(IOPadAnnotation) + implicit val _noiopad = yamlFormat1(NoIOPadAnnotation) + implicit val _supplyanno = yamlFormat5(SupplyAnnotation) + implicit val _modulepadanno = yamlFormat4(ModulePadAnnotation) +} + +abstract class FirrtlPadTransformAnnotation { + def targetName: String +} + +// IO Port can either be annotated with padName + padSide OR noPad (mutually exclusive) +abstract class IOAnnotation { + def serialize: String +} +case class IOPadAnnotation(padSide: String, padName: String) extends IOAnnotation { + import PadAnnotationsYaml._ + def serialize: String = this.toYaml.prettyPrint + def getPadSide: PadSide = HasPadAnnotation.getSide(padSide) +} +case class NoIOPadAnnotation(noPad: String = "") extends IOAnnotation { + import PadAnnotationsYaml._ + def serialize: String = this.toYaml.prettyPrint + def field = "noPad:" +} +// Firrtl version +case class TargetIOPadAnnoF(target: ComponentName, anno: IOAnnotation) extends FirrtlPadTransformAnnotation { + def getAnno = Annotation(target, classOf[AddIOPadsTransform], anno.serialize) + def targetName = target.name +} +// Chisel version +case class TargetIOPadAnnoC(target: Element, anno: IOAnnotation) { + def getAnno = ChiselAnnotation(target, classOf[AddIOPadsTransform], anno.serialize) +} + +// A bunch of supply pads (designated by name, # on each chip side) can be associated with the top module +case class SupplyAnnotation( + padName: String, + leftSide: Int = 0, + rightSide: Int = 0, + topSide: Int = 0, + bottomSide: Int = 0) +// The chip top should have a default pad side, a pad template file, and supply annotations +case class ModulePadAnnotation( + defaultPadSide: String = Top.serialize, + coreWidth: Int = 0, + coreHeight: Int = 0, + supplyAnnos: Seq[SupplyAnnotation] = Seq.empty) { + import PadAnnotationsYaml._ + def serialize: String = this.toYaml.prettyPrint + val supplyPadNames = supplyAnnos.map(_.padName) + require(supplyPadNames.distinct.length == supplyPadNames.length, "Supply pads should only be specified once!") + def getDefaultPadSide: PadSide = HasPadAnnotation.getSide(defaultPadSide) +} +// Firrtl version +case class TargetModulePadAnnoF(target: ModuleName, anno: ModulePadAnnotation) extends FirrtlPadTransformAnnotation { + def getAnno = Annotation(target, classOf[AddIOPadsTransform], anno.serialize) + def targetName = target.name +} +// Chisel version +case class TargetModulePadAnnoC(target: Module, anno: ModulePadAnnotation) { + def getAnno = ChiselAnnotation(target, classOf[AddIOPadsTransform], anno.serialize) +} + +case class CollectedAnnos( + componentAnnos: Seq[TargetIOPadAnnoF], + moduleAnnos: TargetModulePadAnnoF) { + def supplyAnnos = moduleAnnos.anno.supplyAnnos + def defaultPadSide = moduleAnnos.anno.defaultPadSide + def topModName = moduleAnnos.targetName + def coreWidth = moduleAnnos.anno.coreWidth + def coreHeight = moduleAnnos.anno.coreHeight +} + +object HasPadAnnotation { + import PadAnnotationsYaml._ + + def getSide(a: String): PadSide = a match { + case i if i == Left.serialize => Left + case i if i == Right.serialize => Right + case i if i == Top.serialize => Top + case i if i == Bottom.serialize => Bottom + case _ => throw new Exception(s" $a not a valid pad side annotation!") + } + + def unapply(a: Annotation): Option[FirrtlPadTransformAnnotation] = a match { + case Annotation(f, t, s) if t == classOf[AddIOPadsTransform] => f match { + case m: ModuleName => + Some(TargetModulePadAnnoF(m, s.parseYaml.convertTo[ModulePadAnnotation])) + case c: ComponentName if s.contains(NoIOPadAnnotation().field) => + Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[NoIOPadAnnotation])) + case c: ComponentName => + Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[IOPadAnnotation])) + case _ => throw new Exception("Annotation only valid on module or component") + } + case _ => None + } + + def apply(annos: Seq[Annotation]): Option[CollectedAnnos] = { + // Get all pad-related annotations (config files, pad sides, pad names, etc.) + val padAnnos = annos.map(x => unapply(x)).flatten + val targets = padAnnos.map(x => x.targetName) + require(targets.distinct.length == targets.length, "Only 1 pad related annotation is allowed per component/module") + if (padAnnos.length == 0) None + else { + val moduleAnnosTemp = padAnnos.filter { + case TargetModulePadAnnoF(_, _) => true + case _ => false + } + require(moduleAnnosTemp.length == 1, "Only 1 module may be designated 'Top'") + val moduleAnnos = moduleAnnosTemp.head + val topModName = moduleAnnos.targetName + val componentAnnos = padAnnos.filter { + case TargetIOPadAnnoF(ComponentName(_, ModuleName(n, _)), _) if n == topModName => + true + case TargetIOPadAnnoF(ComponentName(_, ModuleName(n, _)), _) if n != topModName => + throw new Exception("Pad related component annotations must all be in the same top module") + case _ => false + }.map(x => x.asInstanceOf[TargetIOPadAnnoF]) + Some(CollectedAnnos(componentAnnos, moduleAnnos.asInstanceOf[TargetModulePadAnnoF])) + } + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/PadDescriptors.scala b/tapeout/src/main/scala/transforms/pads/PadDescriptors.scala new file mode 100644 index 000000000..95a2cb1bd --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/PadDescriptors.scala @@ -0,0 +1,56 @@ +package barstools.tapeout.transforms.pads + +import firrtl._ +import firrtl.ir._ + +abstract class PadOrientation extends FirrtlNode +case object Horizontal extends PadOrientation { + def serialize: String = "horizontal" +} +case object Vertical extends PadOrientation { + def serialize: String = "vertical" +} + +abstract class PadType extends FirrtlNode +case object DigitalPad extends PadType { + def serialize: String = "digital" + def inName: String = "in" + def outName: String = "out" +} +case object AnalogPad extends PadType { + def serialize: String = "analog" + def ioName: String = "io" +} +case object SupplyPad extends PadType { + def serialize: String = "supply" +} +case object NoPad extends PadType { + def serialize: String = "none" +} + +case object InOut extends Direction { + def serialize: String = "inout" +} +case object NoDirection extends Direction { + def serialize: String = "none" +} + +abstract class PadSide extends FirrtlNode { + def orientation: PadOrientation +} +case object Left extends PadSide { + def serialize: String = "left" + def orientation: PadOrientation = Horizontal +} +case object Right extends PadSide { + def serialize: String = "right" + def orientation: PadOrientation = Horizontal +} +case object Top extends PadSide { + def serialize: String = "top" + def orientation: PadOrientation = Vertical +} +case object Bottom extends PadSide { + def serialize: String = "bottom" + def orientation: PadOrientation = Vertical +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/PadPlacement.scala b/tapeout/src/main/scala/transforms/pads/PadPlacement.scala new file mode 100644 index 000000000..2d774f01e --- /dev/null +++ b/tapeout/src/main/scala/transforms/pads/PadPlacement.scala @@ -0,0 +1,116 @@ +package barstools.tapeout.transforms.pads + +import net.jcazevedo.moultingyaml._ + +import firrtl._ +import firrtl.ir._ +import barstools.tapeout.transforms._ + +case class PadPlacement( + file: String, + left: String, + top: String, + right: String, + bottom: String, + instanceArray: String, + padLine: String, + template: String) { + + require(instanceArray contains "{{signal}}", "Instance Array Template should contain {{signal}}") + require(instanceArray contains "{{idx}}", "Instance Array Template should contain {{idx}}") + require(padLine contains "{{padInst}}", "Pad line should contain {{padInst}}") + require(padLine contains "{{side}}", "Pad line should contain {{side}} (Can be in comments)") + require(padLine contains "{{padIdx}}", "Pad line should contain {{padIdx}} (Can be in comments)") + require(template contains "{{leftPads}}", "Pad line should contain {{leftPads}}") + require(template contains "{{rightPads}}", "Pad line should contain {{rightPads}}") + require(template contains "{{topPads}}", "Pad line should contain {{topPads}}") + require(template contains "{{bottomPads}}", "Pad line should contain {{bottomPads}}") + + def getSideString(s: PadSide): String = s match { + case Left => left + case Right => right + case Top => top + case Bottom => bottom + } + + import com.gilt.handlebars.scala.binding.dynamic._ + import com.gilt.handlebars.scala.Handlebars + + private val instanceArrayTemplate = Handlebars(instanceArray.stripMargin) + private val padLineTemplate = Handlebars(padLine.stripMargin) + private val padPlacementTemplate = Handlebars(template.stripMargin) + + def getInstanceArray(p: InstanceArrayParams): String = instanceArrayTemplate(p).stripMargin + def getPadLine(p: PadLineParams): String = padLineTemplate(p).stripMargin.replace(""", "\"") + def getPadPlacement(p: PadPlacementParams): String = padPlacementTemplate(p).stripMargin.replace(""", "\"") + +} + +case class InstanceArrayParams(signal: String, idx: Int) +case class PadLineParams(padInst: String, side: String, padIdx: Int) +case class PadPlacementParams(leftPads: String, rightPads: String, topPads: String, bottomPads: String) + +object PadPlacementFile extends DefaultYamlProtocol { + val exampleResource = "/PadPlacement.yaml" + implicit val _pad = yamlFormat8(PadPlacement) + def parse(file: String = ""): PadPlacement = { + (new YamlFileReader(exampleResource)).parse[PadPlacement](file).head + } + def generate( + techDir: String, + targetDir: String, + padFrameName: String, + portPads: Seq[PortIOPad], + supplyPads: Seq[TopSupplyPad]): Unit = { + + val file = techDir + exampleResource + if(techDir != "" && !(new java.io.File(file)).exists()) + throw new Exception("Technology directory must contain PadPlacement.yaml!") + val template = parse(if (techDir == "") "" else file) + + val leftPads = scala.collection.mutable.ArrayBuffer[String]() + val rightPads = scala.collection.mutable.ArrayBuffer[String]() + val topPads = scala.collection.mutable.ArrayBuffer[String]() + val bottomPads = scala.collection.mutable.ArrayBuffer[String]() + + def sort(side: PadSide, inst: String): Unit = side match { + case Left => leftPads += inst + case Right => rightPads += inst + case Top => topPads += inst + case Bottom => bottomPads += inst + } + + // TODO: Be smarter about supply placement (+ grouping?) between signals + // Supply pad instance name: padFrameName/firrtlBBName_padSide_#num/PAD[#supplySetNum] + supplyPads foreach { p => + val prefixes = p.arrayInstNamePrefix(padFrameName) + prefixes foreach { prefix => + (0 until p.supplySetNum) foreach { idx => + sort(p.padSide, template.getInstanceArray(InstanceArrayParams(prefix, idx))) + } + } + } + // IO pad instance name: padFrameName/firrtlBBName/getPadName[#portWidth]/PAD + portPads.filter(_.pad.nonEmpty) foreach { p => + val prefix = p.arrayInstNamePrefix(padFrameName) + (0 until p.portWidth).map(idx => + template.getInstanceArray(InstanceArrayParams(prefix, idx)) + p.arrayInstNameSuffix + ) foreach { x => sort(p.padSide, x) } + } + + def getLines(pads: Seq[String], side: PadSide): String = { + val seq = pads.zipWithIndex.map{ case (p, idx) => + template.getPadLine(PadLineParams(p, template.getSideString(side), idx)) } + seq.mkString("\n") + } + + val fileContents = template.getPadPlacement(PadPlacementParams( + leftPads = getLines(leftPads.toSeq, Left), + rightPads = getLines(rightPads.toSeq, Right), + topPads = getLines(topPads.toSeq, Top), + bottomPads = getLines(bottomPads.toSeq, Bottom) + )) + + WriteConfig(targetDir, template.file, fileContents) + } +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/utils/DiGraph.scala b/tapeout/src/main/scala/transforms/utils/DiGraph.scala new file mode 100644 index 000000000..8e0db0787 --- /dev/null +++ b/tapeout/src/main/scala/transforms/utils/DiGraph.scala @@ -0,0 +1,158 @@ +package firrtl + +import scala.collection.immutable.{HashSet, HashMap} +import scala.collection.mutable +import scala.collection.mutable.MultiMap + +class MutableDiGraph[T]( + val edgeData: MultiMap[T,T] = new mutable.HashMap[T, mutable.Set[T]] with MultiMap[T, T]) { + def contains(v: T) = edgeData.contains(v) + def getVertices = edgeData.keys + def getEdges(v: T) = edgeData(v) + def addVertex(v: T): T = { + edgeData.getOrElseUpdate(v,new mutable.HashSet[T]) + v + } + // Add v to keys to maintain invariant + def addEdge(u: T, v: T) = { + edgeData.getOrElseUpdate(v, new mutable.HashSet[T]) + edgeData.addBinding(u,v) + } +} + +object DiGraph { + def apply[T](mdg: MutableDiGraph[T]) = new DiGraph((mdg.edgeData mapValues { _.toSet }).toMap[T, Set[T]]) + def apply[T](edgeData: MultiMap[T,T]) = new DiGraph((edgeData mapValues { _.toSet }).toMap[T, Set[T]]) +} + +class DiGraph[T] (val edges: Map[T, Set[T]]) { + + def getVertices = edges.keys + def getEdges(v: T) = edges.getOrElse(v, new HashSet[T]) + + // Graph must be acyclic for valid linearization + def linearize(root: T) = { + val order = new mutable.ArrayBuffer[T] + val visited = new mutable.HashSet[T] + def explore(v: T): Unit = { + visited += v + for (u <- getEdges(v)) { + if (!visited.contains(u)) { + explore(u) + } + } + order.append(v) + } + explore(root) + order.reverse.toList + } + + def doBFS(root: T) = { + val prev = new mutable.HashMap[T,T] + val queue = new mutable.Queue[T] + queue.enqueue(root) + while (!queue.isEmpty) { + val u = queue.dequeue + for (v <- getEdges(u)) { + if (!prev.contains(v)) { + prev(v) = u + queue.enqueue(v) + } + } + } + prev + } + + def reachabilityBFS(root: T) = doBFS(root).keys.toSet + + def path(start: T, end: T) = { + val nodePath = new mutable.ArrayBuffer[T] + val prev = doBFS(start) + nodePath += end + while (nodePath.last != start) { + nodePath += prev(nodePath.last) + } + nodePath.toList.reverse + } + + def findSCCs = { + var counter: BigInt = 0 + val stack = new mutable.Stack[T] + val onstack = new mutable.HashSet[T] + val indices = new mutable.HashMap[T, BigInt] + val lowlinks = new mutable.HashMap[T, BigInt] + val sccs = new mutable.ArrayBuffer[List[T]] + + def strongConnect(v: T): Unit = { + indices(v) = counter + lowlinks(v) = counter + counter = counter + 1 + stack.push(v) + onstack += v + for (w <- getEdges(v)) { + if (!indices.contains(w)) { + strongConnect(w) + lowlinks(v) = lowlinks(v).min(lowlinks(w)) + } else if (onstack.contains(w)) { + lowlinks(v) = lowlinks(v).min(indices(w)) + } + } + if (lowlinks(v) == indices(v)) { + val scc = new mutable.ArrayBuffer[T] + do { + val w = stack.pop + onstack -= w + scc += w + } + while (scc.last != v); + sccs.append(scc.toList) + } + } + + for (v <- getVertices) { + strongConnect(v) + } + + sccs.toList + } + + def pathsInDAG(start: T): Map[T,List[List[T]]] = { + // paths(v) holds the set of paths from start to v + val paths = new mutable.HashMap[T,mutable.Set[List[T]]] with mutable.MultiMap[T,List[T]] + val queue = new mutable.Queue[T] + val visited = new mutable.HashSet[T] + paths.addBinding(start,List(start)) + queue.enqueue(start) + visited += start + while (!queue.isEmpty) { + val current = queue.dequeue + for (v <- getEdges(current)) { + if (!visited.contains(v)) { + queue.enqueue(v) + visited += v + } + for (p <- paths(current)) { + paths.addBinding(v, p :+ v) + } + } + } + (paths map { case (k,v) => (k,v.toList) }).toMap + } + + def reverse = { + val mdg = new MutableDiGraph[T] + edges foreach { case (u,edges) => edges.foreach({ v => mdg.addEdge(v,u) }) } + DiGraph(mdg) + } + + def simplify(vprime: Set[T]) = { + val eprime = vprime.map( v => (v,reachabilityBFS(v) & vprime) ).toMap + new DiGraph(eprime) + } + + def transformNodes[Q](f: (T) => Q): DiGraph[Q] = { + val eprime = edges.map({ case (k,v) => (f(k),v.map(f(_))) }) + new DiGraph(eprime) + } + +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/utils/FileUtils.scala b/tapeout/src/main/scala/transforms/utils/FileUtils.scala new file mode 100644 index 000000000..5fc358420 --- /dev/null +++ b/tapeout/src/main/scala/transforms/utils/FileUtils.scala @@ -0,0 +1,65 @@ +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.annotations._ +import firrtl.passes._ +import firrtl.ir._ + +object WriteConfig { + def apply(dir: String, file: String, contents: String): Unit = { + val writer = new java.io.PrintWriter(new java.io.File(s"$dir/$file")) + writer write contents + writer.close() + } +} + +object GetTargetDir { + def apply(state: CircuitState): String = { + val annos = state.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations + val destDir = annos.map { + case Annotation(f, t, s) if t == classOf[transforms.BlackBoxSourceHelper] => + transforms.BlackBoxSource.parse(s) match { + case Some(transforms.BlackBoxTargetDir(dest)) => Some(dest) + case _ => None + } + case _ => None + }.flatten + val loc = { + if (destDir.isEmpty) "." + else destDir.head + } + val targetDir = new java.io.File(loc) + if(!targetDir.exists()) FileUtils.makeDirectory(targetDir.getAbsolutePath) + loc + } +} + +// Fake transform just to track Technology information directory +object TechnologyLocation { + def apply(dir: String): Annotation = { + Annotation(CircuitName("All"), classOf[TechnologyLocation], dir) + } +} +class TechnologyLocation extends Transform { + def inputForm: CircuitForm = LowForm + def outputForm: CircuitForm = LowForm + def execute(state: CircuitState) = throw new Exception("Technology Location transform execution doesn't work!") + def get(state: CircuitState): String = { + val annos = state.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations + val dir = annos.map { + case Annotation(f, t, s) if t == classOf[TechnologyLocation] => Some(s) + case _ => None + }.flatten + dir.length match { + case 0 => "" + case 1 => + val targetDir = new java.io.File(dir.head) + if(!targetDir.exists()) throw new Exception("Technology yaml directory doesn't exist!") + dir.head + case _ => throw new Exception("Only 1 tech directory annotation allowed!") + } + } +} + + + diff --git a/tapeout/src/main/scala/transforms/utils/InstanceGraph.scala b/tapeout/src/main/scala/transforms/utils/InstanceGraph.scala new file mode 100644 index 000000000..10b37ea83 --- /dev/null +++ b/tapeout/src/main/scala/transforms/utils/InstanceGraph.scala @@ -0,0 +1,51 @@ +package firrtl.analyses + +import scala.collection.mutable + +import firrtl._ +import firrtl.ir._ +import firrtl.Utils._ +import firrtl.Mappers._ + +class InstanceGraph(c: Circuit) { + + private def collectInstances(insts: mutable.Set[WDefInstance])(s: Statement): Statement = s match { + case i: WDefInstance => + insts += i + i + case _ => + s map collectInstances(insts) + s + } + + val moduleMap = c.modules.map({m => (m.name,m) }).toMap + val childInstances = + new mutable.HashMap[String,mutable.Set[WDefInstance]] + for (m <- c.modules) { + childInstances(m.name) = new mutable.HashSet[WDefInstance] + m map collectInstances(childInstances(m.name)) + } + val instanceGraph = new MutableDiGraph[WDefInstance] + val instanceQueue = new mutable.Queue[WDefInstance] + val topInstance = WDefInstance(c.main,c.main) // top instance + instanceQueue.enqueue(topInstance) + while (!instanceQueue.isEmpty) { + val current = instanceQueue.dequeue + for (child <- childInstances(current.module)) { + if (!instanceGraph.contains(child)) { + instanceQueue.enqueue(child) + } + instanceGraph.addEdge(current,child) + } + } + + val graph = DiGraph(instanceGraph) + + lazy val fullHierarchy = graph.pathsInDAG(topInstance) + + def findInstancesInHierarchy(module: String): List[List[WDefInstance]] = { + val instances = graph.getVertices.filter(_.module == module).toList + instances flatMap { i => fullHierarchy(i) } + } + +} diff --git a/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala b/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala new file mode 100644 index 000000000..a11bfa195 --- /dev/null +++ b/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala @@ -0,0 +1,5 @@ +package barstools.tapeout.transforms + +object LowerName { + def apply(s: String): String = s.replace(".", "_").replace("[", "_")replace("]", "") +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala b/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala new file mode 100644 index 000000000..7df199ba9 --- /dev/null +++ b/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala @@ -0,0 +1,24 @@ +package barstools.tapeout.transforms + +import chisel3._ +import scala.collection.immutable.ListMap + +final class CustomBundle(elts: (String, Data)*) extends Record { + val elements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) + def apply(elt: String): Data = elements(elt) + override def cloneType = (new CustomBundle(elements.toList: _*)).asInstanceOf[this.type] +} + +final class CustomIndexedBundle(elts: (Int, Data)*) extends Record { + // Must be String, Data + val elements = ListMap(elts map { case (field, elt) => field.toString -> elt.chiselCloneType }: _*) + def indexedElements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) + def apply(elt: Int): Data = elements(elt.toString) + override def cloneType = (new CustomIndexedBundle(indexedElements.toList: _*)).asInstanceOf[this.type] +} + +object CustomIndexedBundle { + def apply(gen: Data, idxs: Seq[Int]) = new CustomIndexedBundle(idxs.map(_ -> gen): _*) + // Allows Vecs of elements of different types/widths + def apply(gen: Seq[Data]) = new CustomIndexedBundle(gen.zipWithIndex.map{ case (elt, field) => field -> elt }: _*) +} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala b/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala new file mode 100644 index 000000000..6754136d6 --- /dev/null +++ b/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala @@ -0,0 +1,21 @@ +package barstools.tapeout.transforms + +import net.jcazevedo.moultingyaml._ +import java.io.File + +class YamlFileReader(resource: String) { + def parse[A](file: String = "")(implicit reader: YamlReader[A]) : Seq[A] = { + // If the user doesn't provide a Yaml file name, use defaults + val yamlString = file match { + case f if f.isEmpty => + // Use example config if no file is provided + val stream = getClass.getResourceAsStream(resource) + io.Source.fromInputStream(stream).mkString + case f if new File(f).exists => + scala.io.Source.fromFile(f).getLines.mkString("\n") + case _ => + throw new Exception("No valid Yaml file found!") + } + yamlString.parseYamls.map(x => reader.read(x)) + } +} \ No newline at end of file diff --git a/tapeout/src/test/resources/PadAnnotationVerilogPart.v b/tapeout/src/test/resources/PadAnnotationVerilogPart.v new file mode 100644 index 000000000..9e4b257f7 --- /dev/null +++ b/tapeout/src/test/resources/PadAnnotationVerilogPart.v @@ -0,0 +1,231 @@ +module ExampleTopModuleWithBB_PadFrame( + output clock_Int, + output reset_Int, + output [14:0] io_a_Int, + output [14:0] io_b_Int, + output [13:0] io_c_Int, + input [15:0] io_x_Int, + input [15:0] io_y_Int, + input [15:0] io_z_Int, + input [4:0] io_v_0_Int, + input [4:0] io_v_1_Int, + input [4:0] io_v_2_Int, + input clock_Ext, + input reset_Ext, + input [14:0] io_a_Ext, + input [14:0] io_b_Ext, + input [13:0] io_c_Ext, + output [15:0] io_x_Ext, + output [15:0] io_y_Ext, + output [15:0] io_z_Ext, + inout [2:0] io_analog1_Ext, + inout [2:0] io_analog2_Ext, + output [4:0] io_v_0_Ext, + output [4:0] io_v_1_Ext, + output [4:0] io_v_2_Ext +); + wire pad_digital_from_tristate_foundry_vertical_input_array_reset_in; + wire pad_digital_from_tristate_foundry_vertical_input_array_reset_out; + wire [14:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_a_in; + wire [14:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_a_out; + wire [14:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_b_in; + wire [14:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_b_out; + wire [13:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_c_in; + wire [13:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_c_out; + wire [15:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_x_in; + wire [15:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_x_out; + wire [15:0] pad_digital_from_tristate_foundry_vertical_output_array_io_z_in; + wire [15:0] pad_digital_from_tristate_foundry_vertical_output_array_io_z_out; + wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_in; + wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_out; + wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_in; + wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_out; + wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_in; + wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_out; + pad_digital_from_tristate_foundry_vertical_input_array #(.WIDTH(1)) pad_digital_from_tristate_foundry_vertical_input_array_reset ( + .in(pad_digital_from_tristate_foundry_vertical_input_array_reset_in), + .out(pad_digital_from_tristate_foundry_vertical_input_array_reset_out) + ); + pad_digital_from_tristate_foundry_horizontal_input_array #(.WIDTH(15)) pad_digital_from_tristate_foundry_horizontal_input_array_io_a ( + .in(pad_digital_from_tristate_foundry_horizontal_input_array_io_a_in), + .out(pad_digital_from_tristate_foundry_horizontal_input_array_io_a_out) + ); + pad_digital_from_tristate_foundry_horizontal_input_array #(.WIDTH(15)) pad_digital_from_tristate_foundry_horizontal_input_array_io_b ( + .in(pad_digital_from_tristate_foundry_horizontal_input_array_io_b_in), + .out(pad_digital_from_tristate_foundry_horizontal_input_array_io_b_out) + ); + pad_digital_from_tristate_foundry_horizontal_input_array #(.WIDTH(14)) pad_digital_from_tristate_foundry_horizontal_input_array_io_c ( + .in(pad_digital_from_tristate_foundry_horizontal_input_array_io_c_in), + .out(pad_digital_from_tristate_foundry_horizontal_input_array_io_c_out) + ); + pad_digital_from_tristate_foundry_horizontal_output_array #(.WIDTH(16)) pad_digital_from_tristate_foundry_horizontal_output_array_io_x ( + .in(pad_digital_from_tristate_foundry_horizontal_output_array_io_x_in), + .out(pad_digital_from_tristate_foundry_horizontal_output_array_io_x_out) + ); + pad_digital_from_tristate_foundry_vertical_output_array #(.WIDTH(16)) pad_digital_from_tristate_foundry_vertical_output_array_io_z ( + .in(pad_digital_from_tristate_foundry_vertical_output_array_io_z_in), + .out(pad_digital_from_tristate_foundry_vertical_output_array_io_z_out) + ); + pad_analog_fast_custom_horizontal_array #(.WIDTH(3)) pad_analog_fast_custom_horizontal_array_io_analog1 ( + .io(io_analog1_Ext) + ); + pad_analog_slow_foundry_vertical_array #(.WIDTH(3)) pad_analog_slow_foundry_vertical_array_io_analog2 ( + .io(io_analog2_Ext) + ); + pad_digital_from_tristate_foundry_horizontal_output_array #(.WIDTH(5)) pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0 ( + .in(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_in), + .out(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_out) + ); + pad_digital_from_tristate_foundry_horizontal_output_array #(.WIDTH(5)) pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1 ( + .in(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_in), + .out(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_out) + ); + pad_digital_from_tristate_foundry_horizontal_output_array #(.WIDTH(5)) pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2 ( + .in(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_in), + .out(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_out) + ); + pad_supply_vdd_horizontal pad_supply_vdd_horizontal_left_0 ( + ); + pad_supply_vdd_horizontal pad_supply_vdd_horizontal_left_1 ( + ); + pad_supply_vdd_horizontal pad_supply_vdd_horizontal_left_2 ( + ); + pad_supply_vdd_vertical pad_supply_vdd_vertical_bottom_0 ( + ); + pad_supply_vdd_vertical pad_supply_vdd_vertical_bottom_1 ( + ); + pad_supply_vss_horizontal pad_supply_vss_horizontal_right_0 ( + ); + assign clock_Int = clock_Ext; + assign reset_Int = pad_digital_from_tristate_foundry_vertical_input_array_reset_out; + assign io_a_Int = pad_digital_from_tristate_foundry_horizontal_input_array_io_a_out; + assign io_b_Int = pad_digital_from_tristate_foundry_horizontal_input_array_io_b_out; + assign io_c_Int = $signed(pad_digital_from_tristate_foundry_horizontal_input_array_io_c_out); + assign io_x_Ext = pad_digital_from_tristate_foundry_horizontal_output_array_io_x_out; + assign io_y_Ext = io_y_Int; + assign io_z_Ext = $signed(pad_digital_from_tristate_foundry_vertical_output_array_io_z_out); + assign io_v_0_Ext = pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_out; + assign io_v_1_Ext = pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_out; + assign io_v_2_Ext = pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_out; + assign pad_digital_from_tristate_foundry_vertical_input_array_reset_in = reset_Ext; + assign pad_digital_from_tristate_foundry_horizontal_input_array_io_a_in = io_a_Ext; + assign pad_digital_from_tristate_foundry_horizontal_input_array_io_b_in = io_b_Ext; + assign pad_digital_from_tristate_foundry_horizontal_input_array_io_c_in = $unsigned(io_c_Ext); + assign pad_digital_from_tristate_foundry_horizontal_output_array_io_x_in = io_x_Int; + assign pad_digital_from_tristate_foundry_vertical_output_array_io_z_in = $unsigned(io_z_Int); + assign pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_in = io_v_0_Int; + assign pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_in = io_v_1_Int; + assign pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_in = io_v_2_Int; +endmodule +module ExampleTopModuleWithBB( + input clock, + input reset, + input [14:0] io_a, + input [14:0] io_b, + input [13:0] io_c, + output [15:0] io_x, + output [15:0] io_y, + output [15:0] io_z, + inout [2:0] io_analog1, + inout [2:0] io_analog2, + output [4:0] io_v_0, + output [4:0] io_v_1, + output [4:0] io_v_2 +); + wire ExampleTopModuleWithBB_PadFrame_clock_Int; + wire ExampleTopModuleWithBB_PadFrame_reset_Int; + wire [14:0] ExampleTopModuleWithBB_PadFrame_io_a_Int; + wire [14:0] ExampleTopModuleWithBB_PadFrame_io_b_Int; + wire [13:0] ExampleTopModuleWithBB_PadFrame_io_c_Int; + wire [15:0] ExampleTopModuleWithBB_PadFrame_io_x_Int; + wire [15:0] ExampleTopModuleWithBB_PadFrame_io_y_Int; + wire [15:0] ExampleTopModuleWithBB_PadFrame_io_z_Int; + wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_0_Int; + wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_1_Int; + wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_2_Int; + wire ExampleTopModuleWithBB_PadFrame_clock_Ext; + wire ExampleTopModuleWithBB_PadFrame_reset_Ext; + wire [14:0] ExampleTopModuleWithBB_PadFrame_io_a_Ext; + wire [14:0] ExampleTopModuleWithBB_PadFrame_io_b_Ext; + wire [13:0] ExampleTopModuleWithBB_PadFrame_io_c_Ext; + wire [15:0] ExampleTopModuleWithBB_PadFrame_io_x_Ext; + wire [15:0] ExampleTopModuleWithBB_PadFrame_io_y_Ext; + wire [15:0] ExampleTopModuleWithBB_PadFrame_io_z_Ext; + wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_0_Ext; + wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_1_Ext; + wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_2_Ext; + wire ExampleTopModuleWithBB_Internal_clock; + wire ExampleTopModuleWithBB_Internal_reset; + wire [14:0] ExampleTopModuleWithBB_Internal_io_a; + wire [14:0] ExampleTopModuleWithBB_Internal_io_b; + wire [13:0] ExampleTopModuleWithBB_Internal_io_c; + wire [15:0] ExampleTopModuleWithBB_Internal_io_x; + wire [15:0] ExampleTopModuleWithBB_Internal_io_y; + wire [15:0] ExampleTopModuleWithBB_Internal_io_z; + wire [4:0] ExampleTopModuleWithBB_Internal_io_v_0; + wire [4:0] ExampleTopModuleWithBB_Internal_io_v_1; + wire [4:0] ExampleTopModuleWithBB_Internal_io_v_2; + ExampleTopModuleWithBB_PadFrame ExampleTopModuleWithBB_PadFrame ( + .clock_Int(ExampleTopModuleWithBB_PadFrame_clock_Int), + .reset_Int(ExampleTopModuleWithBB_PadFrame_reset_Int), + .io_a_Int(ExampleTopModuleWithBB_PadFrame_io_a_Int), + .io_b_Int(ExampleTopModuleWithBB_PadFrame_io_b_Int), + .io_c_Int(ExampleTopModuleWithBB_PadFrame_io_c_Int), + .io_x_Int(ExampleTopModuleWithBB_PadFrame_io_x_Int), + .io_y_Int(ExampleTopModuleWithBB_PadFrame_io_y_Int), + .io_z_Int(ExampleTopModuleWithBB_PadFrame_io_z_Int), + .io_v_0_Int(ExampleTopModuleWithBB_PadFrame_io_v_0_Int), + .io_v_1_Int(ExampleTopModuleWithBB_PadFrame_io_v_1_Int), + .io_v_2_Int(ExampleTopModuleWithBB_PadFrame_io_v_2_Int), + .clock_Ext(ExampleTopModuleWithBB_PadFrame_clock_Ext), + .reset_Ext(ExampleTopModuleWithBB_PadFrame_reset_Ext), + .io_a_Ext(ExampleTopModuleWithBB_PadFrame_io_a_Ext), + .io_b_Ext(ExampleTopModuleWithBB_PadFrame_io_b_Ext), + .io_c_Ext(ExampleTopModuleWithBB_PadFrame_io_c_Ext), + .io_x_Ext(ExampleTopModuleWithBB_PadFrame_io_x_Ext), + .io_y_Ext(ExampleTopModuleWithBB_PadFrame_io_y_Ext), + .io_z_Ext(ExampleTopModuleWithBB_PadFrame_io_z_Ext), + .io_analog1_Ext(io_analog1), + .io_analog2_Ext(io_analog2), + .io_v_0_Ext(ExampleTopModuleWithBB_PadFrame_io_v_0_Ext), + .io_v_1_Ext(ExampleTopModuleWithBB_PadFrame_io_v_1_Ext), + .io_v_2_Ext(ExampleTopModuleWithBB_PadFrame_io_v_2_Ext) + ); + ExampleTopModuleWithBB_Internal ExampleTopModuleWithBB_Internal ( + .clock(ExampleTopModuleWithBB_Internal_clock), + .reset(ExampleTopModuleWithBB_Internal_reset), + .io_a(ExampleTopModuleWithBB_Internal_io_a), + .io_b(ExampleTopModuleWithBB_Internal_io_b), + .io_c(ExampleTopModuleWithBB_Internal_io_c), + .io_x(ExampleTopModuleWithBB_Internal_io_x), + .io_y(ExampleTopModuleWithBB_Internal_io_y), + .io_z(ExampleTopModuleWithBB_Internal_io_z), + .io_analog1(io_analog1), + .io_analog2(io_analog2), + .io_v_0(ExampleTopModuleWithBB_Internal_io_v_0), + .io_v_1(ExampleTopModuleWithBB_Internal_io_v_1), + .io_v_2(ExampleTopModuleWithBB_Internal_io_v_2) + ); + assign io_x = ExampleTopModuleWithBB_PadFrame_io_x_Ext; + assign io_y = ExampleTopModuleWithBB_PadFrame_io_y_Ext; + assign io_z = ExampleTopModuleWithBB_PadFrame_io_z_Ext; + assign io_v_0 = ExampleTopModuleWithBB_PadFrame_io_v_0_Ext; + assign io_v_1 = ExampleTopModuleWithBB_PadFrame_io_v_1_Ext; + assign io_v_2 = ExampleTopModuleWithBB_PadFrame_io_v_2_Ext; + assign ExampleTopModuleWithBB_PadFrame_io_x_Int = ExampleTopModuleWithBB_Internal_io_x; + assign ExampleTopModuleWithBB_PadFrame_io_y_Int = ExampleTopModuleWithBB_Internal_io_y; + assign ExampleTopModuleWithBB_PadFrame_io_z_Int = ExampleTopModuleWithBB_Internal_io_z; + assign ExampleTopModuleWithBB_PadFrame_io_v_0_Int = ExampleTopModuleWithBB_Internal_io_v_0; + assign ExampleTopModuleWithBB_PadFrame_io_v_1_Int = ExampleTopModuleWithBB_Internal_io_v_1; + assign ExampleTopModuleWithBB_PadFrame_io_v_2_Int = ExampleTopModuleWithBB_Internal_io_v_2; + assign ExampleTopModuleWithBB_PadFrame_clock_Ext = clock; + assign ExampleTopModuleWithBB_PadFrame_reset_Ext = reset; + assign ExampleTopModuleWithBB_PadFrame_io_a_Ext = io_a; + assign ExampleTopModuleWithBB_PadFrame_io_b_Ext = io_b; + assign ExampleTopModuleWithBB_PadFrame_io_c_Ext = io_c; + assign ExampleTopModuleWithBB_Internal_clock = ExampleTopModuleWithBB_PadFrame_clock_Int; + assign ExampleTopModuleWithBB_Internal_reset = ExampleTopModuleWithBB_PadFrame_reset_Int; + assign ExampleTopModuleWithBB_Internal_io_a = ExampleTopModuleWithBB_PadFrame_io_a_Int; + assign ExampleTopModuleWithBB_Internal_io_b = ExampleTopModuleWithBB_PadFrame_io_b_Int; + assign ExampleTopModuleWithBB_Internal_io_c = ExampleTopModuleWithBB_PadFrame_io_c_Int; +endmodule \ No newline at end of file diff --git a/tapeout/src/test/resources/PadPlacement.io b/tapeout/src/test/resources/PadPlacement.io new file mode 100644 index 000000000..435ce274c --- /dev/null +++ b/tapeout/src/test/resources/PadPlacement.io @@ -0,0 +1,236 @@ +(globals + version = 3 + io_order = default +) +(iopad + (bottomleft + (inst name="corner_ll" cell="CORNER_EXAMPLE" ) + ) + (bottomright + (inst name="corner_lr" orientation=MY cell="CORNER_EXAMPLE" ) + ) + (topleft + (inst name="corner_ul" orientation=MX cell="CORNER_EXAMPLE" ) + ) + (topright + (inst name="corner_ur" cell="CORNER_EXAMPLE" ) + ) + (left + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_horizontal_left_0/PAD[0]") # Side: 1, Order: 0 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_horizontal_left_1/PAD[0]") # Side: 1, Order: 1 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_horizontal_left_2/PAD[0]") # Side: 1, Order: 2 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[0]/PAD") # Side: 1, Order: 3 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[1]/PAD") # Side: 1, Order: 4 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[2]/PAD") # Side: 1, Order: 5 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[3]/PAD") # Side: 1, Order: 6 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[4]/PAD") # Side: 1, Order: 7 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[5]/PAD") # Side: 1, Order: 8 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[6]/PAD") # Side: 1, Order: 9 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[7]/PAD") # Side: 1, Order: 10 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[8]/PAD") # Side: 1, Order: 11 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[9]/PAD") # Side: 1, Order: 12 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[10]/PAD") # Side: 1, Order: 13 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[11]/PAD") # Side: 1, Order: 14 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[12]/PAD") # Side: 1, Order: 15 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[13]/PAD") # Side: 1, Order: 16 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[14]/PAD") # Side: 1, Order: 17 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[0]/PAD") # Side: 1, Order: 18 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[1]/PAD") # Side: 1, Order: 19 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[2]/PAD") # Side: 1, Order: 20 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[3]/PAD") # Side: 1, Order: 21 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[4]/PAD") # Side: 1, Order: 22 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[5]/PAD") # Side: 1, Order: 23 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[6]/PAD") # Side: 1, Order: 24 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[7]/PAD") # Side: 1, Order: 25 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[8]/PAD") # Side: 1, Order: 26 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[9]/PAD") # Side: 1, Order: 27 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[10]/PAD") # Side: 1, Order: 28 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[11]/PAD") # Side: 1, Order: 29 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[12]/PAD") # Side: 1, Order: 30 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[13]/PAD") # Side: 1, Order: 31 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[14]/PAD") # Side: 1, Order: 32 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[0]/PAD") # Side: 1, Order: 33 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[1]/PAD") # Side: 1, Order: 34 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[2]/PAD") # Side: 1, Order: 35 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[3]/PAD") # Side: 1, Order: 36 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[4]/PAD") # Side: 1, Order: 37 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[5]/PAD") # Side: 1, Order: 38 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[6]/PAD") # Side: 1, Order: 39 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[7]/PAD") # Side: 1, Order: 40 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[8]/PAD") # Side: 1, Order: 41 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[9]/PAD") # Side: 1, Order: 42 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[10]/PAD") # Side: 1, Order: 43 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[11]/PAD") # Side: 1, Order: 44 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[12]/PAD") # Side: 1, Order: 45 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[13]/PAD") # Side: 1, Order: 46 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[0]/PAD") # Side: 1, Order: 47 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[1]/PAD") # Side: 1, Order: 48 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[2]/PAD") # Side: 1, Order: 49 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[3]/PAD") # Side: 1, Order: 50 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[4]/PAD") # Side: 1, Order: 51 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[5]/PAD") # Side: 1, Order: 52 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[6]/PAD") # Side: 1, Order: 53 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[7]/PAD") # Side: 1, Order: 54 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[8]/PAD") # Side: 1, Order: 55 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[9]/PAD") # Side: 1, Order: 56 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[10]/PAD") # Side: 1, Order: 57 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[11]/PAD") # Side: 1, Order: 58 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[12]/PAD") # Side: 1, Order: 59 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[13]/PAD") # Side: 1, Order: 60 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[14]/PAD") # Side: 1, Order: 61 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[15]/PAD") # Side: 1, Order: 62 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_fast_custom_horizontal_array_io_analog1/pad_analog_fast_custom_horizontal[0]/PAD") # Side: 1, Order: 63 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_fast_custom_horizontal_array_io_analog1/pad_analog_fast_custom_horizontal[1]/PAD") # Side: 1, Order: 64 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_fast_custom_horizontal_array_io_analog1/pad_analog_fast_custom_horizontal[2]/PAD") # Side: 1, Order: 65 + + ) + (right + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vss_horizontal_right_0/PAD[0]") # Side: 3, Order: 0 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vss_horizontal_right_0/PAD[1]") # Side: 3, Order: 1 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[0]/PAD") # Side: 3, Order: 2 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[1]/PAD") # Side: 3, Order: 3 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[2]/PAD") # Side: 3, Order: 4 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[3]/PAD") # Side: 3, Order: 5 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[4]/PAD") # Side: 3, Order: 6 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[0]/PAD") # Side: 3, Order: 7 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[1]/PAD") # Side: 3, Order: 8 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[2]/PAD") # Side: 3, Order: 9 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[3]/PAD") # Side: 3, Order: 10 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[4]/PAD") # Side: 3, Order: 11 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[0]/PAD") # Side: 3, Order: 12 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[1]/PAD") # Side: 3, Order: 13 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[2]/PAD") # Side: 3, Order: 14 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[3]/PAD") # Side: 3, Order: 15 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[4]/PAD") # Side: 3, Order: 16 + + ) + (top + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_input_array_reset/pad_digital_from_tristate_foundry_vertical_input[0]/PAD") # Side: 2, Order: 0 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[0]/PAD") # Side: 2, Order: 1 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[1]/PAD") # Side: 2, Order: 2 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[2]/PAD") # Side: 2, Order: 3 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[3]/PAD") # Side: 2, Order: 4 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[4]/PAD") # Side: 2, Order: 5 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[5]/PAD") # Side: 2, Order: 6 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[6]/PAD") # Side: 2, Order: 7 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[7]/PAD") # Side: 2, Order: 8 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[8]/PAD") # Side: 2, Order: 9 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[9]/PAD") # Side: 2, Order: 10 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[10]/PAD") # Side: 2, Order: 11 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[11]/PAD") # Side: 2, Order: 12 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[12]/PAD") # Side: 2, Order: 13 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[13]/PAD") # Side: 2, Order: 14 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[14]/PAD") # Side: 2, Order: 15 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[15]/PAD") # Side: 2, Order: 16 + + ) + (bottom + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_vertical_bottom_0/PAD[0]") # Side: 4, Order: 0 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_vertical_bottom_1/PAD[0]") # Side: 4, Order: 1 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_slow_foundry_vertical_array_io_analog2/pad_analog_slow_foundry_vertical[0]/PAD") # Side: 4, Order: 2 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_slow_foundry_vertical_array_io_analog2/pad_analog_slow_foundry_vertical[1]/PAD") # Side: 4, Order: 3 + + (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_slow_foundry_vertical_array_io_analog2/pad_analog_slow_foundry_vertical[2]/PAD") # Side: 4, Order: 4 + + ) +) \ No newline at end of file diff --git a/tapeout/src/test/scala/transforms/clkgen/ClkGenSpec.scala b/tapeout/src/test/scala/transforms/clkgen/ClkGenSpec.scala new file mode 100644 index 000000000..17ae1c764 --- /dev/null +++ b/tapeout/src/test/scala/transforms/clkgen/ClkGenSpec.scala @@ -0,0 +1,181 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.clkgen + +import chisel3._ +import firrtl._ +import org.scalatest.{FlatSpec, Matchers} +import chisel3.experimental._ +import chisel3.iotesters._ +import chisel3.util.HasBlackBoxInline +import barstools.tapeout.transforms.pads.TopModule + +// Purely to see that clk src tagging works with BBs +class FakeBBClk extends BlackBox with HasBlackBoxInline with IsClkModule { + val io = IO(new Bundle { + val inClk = Input(Clock()) + val outClk = Output(Vec(3, Clock())) + }) + + annotateClkPort(io.inClk, Sink()) + val generatedClks = io.outClk.map { case elt => + val id = getIOName(elt) + val srcId = getIOName(io.inClk) + annotateClkPort(elt.asInstanceOf[Element]) + GeneratedClk(id, Seq(srcId), Seq(0, 1, 2)) + }.toSeq + + annotateDerivedClks(ClkDiv, generatedClks) + + // Generates a "FakeBB.v" file with the following Verilog module + setInline("FakeBBClk.v", + s""" + |module FakeBBClk( + | input inClk, + | output outClk_0, + | output outClk_1, + | output outClk_2 + |); + | always @* begin + | outClk_0 = inClk; + | outClk_1 = inClk; + | outClk_2 = inClk; + | end + |endmodule + """.stripMargin) +} + +class ModWithNestedClkIO(numPhases: Int) extends Bundle { + val inClk = Input(Clock()) + val bbOutClk = Output(Vec(3, Clock())) + val clkDivOut = Output(Vec(numPhases, Clock())) +} + +class TestModWithNestedClkIO(numPhases: Int) extends Bundle { + val bbOutClk = Output(Vec(3, Bool())) + val clkDivOut = Output(Vec(numPhases, Bool())) +} + +class ModWithNestedClk(divBy: Int, phases: Seq[Int], syncReset: Boolean) extends Module { + + val io = IO(new ModWithNestedClkIO(phases.length)) + + val bb = Module(new FakeBBClk) + bb.io.inClk := io.inClk + io.bbOutClk := bb.io.outClk + val clkDiv = Module(new SEClkDivider(divBy, phases, syncReset = syncReset)) + clkDiv.io.reset := reset + clkDiv.io.inClk := io.inClk + phases.zipWithIndex.foreach { case (phase, idx) => io.clkDivOut(idx) := clkDiv.io.outClks(phase) } + +} + +class TopModuleWithClks(val divBy: Int, val phases: Seq[Int]) extends TopModule(usePads = false) { + val io = IO(new Bundle { + val gen1 = new TestModWithNestedClkIO(phases.length) + val gen2 = new TestModWithNestedClkIO(phases.length) + val gen3 = new TestModWithNestedClkIO(phases.length) + val fakeClk1 = Input(Clock()) + val fakeClk2 = Input(Clock()) + }) + + // TODO: Don't have to type Some + annotateClkPort(clock, + id = "clock", // not in io bundle + sink = Sink(Some(ClkSrc(period = 5.0, async = Seq(getIOName(io.fakeClk1))))) + ) + annotateClkPort(io.fakeClk1, Sink(Some(ClkSrc(period = 4.0)))) + annotateClkPort(io.fakeClk2, Sink(Some(ClkSrc(period = 3.0)))) + + // Most complicated: test chain of clock generators + val gen1 = Module(new ModWithNestedClk(divBy, phases, syncReset = true)) + io.gen1.bbOutClk := Vec(gen1.io.bbOutClk.map(x => x.asUInt)) + io.gen1.clkDivOut := Vec(gen1.io.clkDivOut.map(x => x.asUInt)) + gen1.io.inClk := clock + // ClkDiv on generated clk -> reset occurs before first input clk edge + val gen2 = Module(new ModWithNestedClk(divBy, phases, syncReset = false)) + io.gen2.bbOutClk := Vec(gen2.io.bbOutClk.map(x => x.asUInt)) + io.gen2.clkDivOut := Vec(gen2.io.clkDivOut.map(x => x.asUInt)) + gen2.io.inClk := gen1.io.clkDivOut.last + val gen3 = Module(new ModWithNestedClk(divBy, phases, syncReset = false)) + io.gen3.bbOutClk := Vec(gen3.io.bbOutClk.map(x => x.asUInt)) + io.gen3.clkDivOut := Vec(gen3.io.clkDivOut.map(x => x.asUInt)) + gen3.io.inClk := gen1.io.clkDivOut.last +} + +class TopModuleWithClksTester(c: TopModuleWithClks) extends PeekPokeTester(c) { + val maxT = c.divBy * c.divBy * 4 + val numSubClkOutputs = c.io.gen1.clkDivOut.length + val gen1Out = Seq.fill(numSubClkOutputs)(scala.collection.mutable.ArrayBuffer[Int]()) + val gen2Out = Seq.fill(numSubClkOutputs)(scala.collection.mutable.ArrayBuffer[Int]()) + val gen3Out = Seq.fill(numSubClkOutputs)(scala.collection.mutable.ArrayBuffer[Int]()) + reset(10) + for (t <- 0 until maxT) { + for (k <- 0 until numSubClkOutputs) { + gen1Out(k) += peek(c.io.gen1.clkDivOut(k)).intValue + gen2Out(k) += peek(c.io.gen2.clkDivOut(k)).intValue + gen3Out(k) += peek(c.io.gen3.clkDivOut(k)).intValue + } + step(1) + } + + val clkCounts = (0 until maxT) + val clkCountsModDiv = clkCounts.map(_ % c.divBy) + for (k <- 0 until numSubClkOutputs) { + val expected = clkCountsModDiv.map(x => if (x == c.phases(k)) 1 else 0) + expect(gen1Out(k) == expected, s"gen1Out($k) incorrect!") + println(s"gen1Out($k): \t${gen1Out(k).mkString("")}") + } + + val gen1ClkCounts = (0 until maxT/c.divBy).map(i => Seq.fill(c.divBy)(i)).flatten + val gen1ClkCountsModDiv = gen1ClkCounts.map(_ % c.divBy) + + for (k <- 0 until numSubClkOutputs) { + // Handle initial transient + val fillVal = if (c.phases.last == c.divBy - 1 && k == numSubClkOutputs - 1) 1 else 0 + val expected = Seq.fill(c.phases.last)(fillVal) ++ + gen1ClkCountsModDiv.map(x => if (x == c.phases(k)) 1 else 0).dropRight(c.phases.last) + expect(gen2Out(k) == expected, s"gen1Out($k) incorrect!") + println(s"gen2Out($k): \t${gen2Out(k).mkString("")}") + println(s"expected: \t${expected.mkString("")}") + } + + expect(gen2Out == gen3Out, "gen2Out should equal gen3Out") + +} + +class ClkGenSpec extends FlatSpec with Matchers { + + def readOutputFile(dir: String, f: String): String = + scala.io.Source.fromFile(Seq(dir, f).mkString("/")).getLines.mkString("\n") + def readResource(resource: String): String = { + val stream = getClass.getResourceAsStream(resource) + scala.io.Source.fromInputStream(stream).mkString + } + + def checkOutputs(dir: String) = { + } + + behavior of "top module with clk gens" + + it should "pass simple testbench" in { + val optionsManager = new TesterOptionsManager { + firrtlOptions = firrtlOptions.copy( + compilerName = "verilog" + /*annotations = List(passes.clocklist.ClockListAnnotation( + s"-c:TopModuleWithClks:-m:TopModuleWithClks:-o:test.clk" + )), + customTransforms = Seq(new passes.clocklist.ClockListTransform())*/ + ) + testerOptions = testerOptions.copy(isVerbose = false, backendName = "verilator", displayBase = 10) + commonOptions = commonOptions.copy(targetDirName = "test_run_dir/ClkTB") + } + // WARNING: TB requires that phase divBy - 1 should be at the end of the Seq to be OK during initial transient + iotesters.Driver.execute(() => new TopModuleWithClks(4, Seq(0, 1, 3)), optionsManager) { c => + val dir = optionsManager.commonOptions.targetDirName + checkOutputs(dir) + new TopModuleWithClksTester(c) + } should be (true) + } + +} \ No newline at end of file diff --git a/tapeout/src/test/scala/transforms/pads/AddIOPadsSpec.scala b/tapeout/src/test/scala/transforms/pads/AddIOPadsSpec.scala new file mode 100644 index 000000000..b578be974 --- /dev/null +++ b/tapeout/src/test/scala/transforms/pads/AddIOPadsSpec.scala @@ -0,0 +1,226 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.pads + +import chisel3._ +import firrtl._ +import org.scalatest.{FlatSpec, Matchers} +import chisel3.experimental._ +import chisel3.util.HasBlackBoxInline +import chisel3.iotesters._ + +class BB extends BlackBox with HasBlackBoxInline { + val io = IO(new Bundle { + val c = Input(SInt(14.W)) + val z = Output(SInt(16.W)) + val analog1 = Analog(3.W) + val analog2 = analog1.chiselCloneType + }) + // Generates a "FakeBB.v" file with the following Verilog module + setInline("FakeBB.v", + s""" + |module BB( + | input [15:0] c, + | output [15:0] z, + | inout [2:0] analog1, + | inout [2:0] analog2 + |); + | always @* begin + | z = 2 * c; + | analog2 = analog1 + 1; + | end + |endmodule + """.stripMargin) +} + +// If no template file is provided, it'll use the default one (example) in the resource folder +// Default pad side is Top if no side is specified for a given IO +// You can designate the number of different supply pads on each chip side +class ExampleTopModuleWithBB extends TopModule( + supplyAnnos = Seq( + SupplyAnnotation(padName = "vdd", leftSide = 3, bottomSide = 2), + SupplyAnnotation(padName = "vss", rightSide = 1) + )) { + val io = IO(new Bundle { + val a = Input(UInt(15.W)) + val b = a.chiselCloneType + val c = Input(SInt(14.W)) + val x = Output(UInt(16.W)) + val y = x.chiselCloneType + val z = Output(SInt(16.W)) + val analog1 = Analog(3.W) + val analog2 = analog1.chiselCloneType + val v = Output(Vec(3, UInt(5.W))) + }) + + // Can annotate aggregates with pad side location + pad name (should be a name in the yaml template) + annotatePad(io.v, Right, "from_tristate_foundry") + // Can annotate individual elements + annotatePad(io.analog1, Left, "fast_custom") + annotatePad(io.analog2, Bottom, "slow_foundry") + // Looks for a pad that matches the IO type (digital in, digital out, analog) if no name is specified + Seq(io.a, io.b, io.c, io.x) foreach { x => annotatePad(x, Left) } + // Some signals might not want pads associated with them + noPad(io.y) + // Clk might come directly from bump + noPad(clock) + + val bb = Module(new BB()) + bb.io.c := io.c + io.z := bb.io.z + bb.io.analog1 <> io.analog1 + bb.io.analog2 <> io.analog2 + + io.x := io.a + 1.U + io.y := io.b - 1.U + + io.v foreach { lhs => lhs := io.a } + +} + +class SimpleTopModuleTester(c: ExampleTopModuleWithBB) extends PeekPokeTester(c) { + val ax = Seq(5, 3) + val bx = Seq(8, 2) + val cx = Seq(-11, -9) + for (i <- 0 until ax.length) { + poke(c.io.a, ax(i)) + poke(c.io.b, bx(i)) + poke(c.io.c, cx(i)) + expect(c.io.x, ax(i) + 1) + expect(c.io.y, bx(i) - 1) + expect(c.io.z, 2 * cx(i)) + c.io.v foreach { out => expect(out, ax(i)) } + } + // Analog can't be peeked + poked +} + +// Notes: Annotations +// a in 15: left, default digital +// b in 15: left, default digital +// c in 14: left, default digital ; signed +// x out 16: left, default digital +// y out: NOPAD +// clk in: NOPAD +// analog1 3: left, fast_custom +// analog2 3: bottom, slow_foundry +// v (vec of 3 with 5, out): right, from_tristate_foundry +// reset in: UNSPECIFIED: top, default digital +// z out 16: UNSPECIFIED: top, default digital ; signed +// vdd, left: 3, group of 1 +// vdd, bottom: 2, group of 1 +// vss, right: 1, group of 2 +// Notes: Used pads +// digital horizontal (from_tristate_foundry) +// in + out +// analog fast_custom horizontal +// analog slow_foundry vertical +// digital vertical (from_tristate_foundry) +// in + out +// vdd horizontal +// vdd vertical +// vss horizontal + +class IOPadSpec extends FlatSpec with Matchers { + + def readOutputFile(dir: String, f: String): String = + scala.io.Source.fromFile(Seq(dir, f).mkString("/")).getLines.mkString("\n") + def readResource(resource: String): String = { + val stream = getClass.getResourceAsStream(resource) + scala.io.Source.fromInputStream(stream).mkString + } + + def checkOutputs(dir: String) = { + // Show that black box source helper is run + //readOutputFile(dir, "black_box_verilog_files.f") should include ("pad_supply_vdd_horizontal.v") + + val padBBEx = s"""// Digital Pad Example + |// Signal Direction: Input + |// Pad Orientation: Horizontal + |// Call your instance PAD + |module pad_digital_from_tristate_foundry_horizontal_input( + | input in, + | output reg out + |); + | // Where you would normally dump your pad instance + | always @* begin + | out = in; + | end + |endmodule + | + |module pad_digital_from_tristate_foundry_horizontal_input_array #( + | parameter int WIDTH=1 + |)( + | input [WIDTH-1:0] in, + | output reg [WIDTH-1:0] out + |); + | pad_digital_from_tristate_foundry_horizontal_input pad_digital_from_tristate_foundry_horizontal_input[WIDTH-1:0]( + | .in(in), + | .out(out) + | );""".stripMargin + // Make sure black box templating is OK + readOutputFile(dir, "pad_digital_from_tristate_foundry_horizontal_input_array.v") should include (padBBEx) + + val verilog = readOutputFile(dir, "ExampleTopModuleWithBB.v") + // Pad frame + top should be exact + verilog should include (readResource("/PadAnnotationVerilogPart.v")) + // Pad Placement IO file should be exact + val padIO = readOutputFile(dir, "pads.io") + padIO should include(readResource("/PadPlacement.io")) + } + + behavior of "top module with blackbox" + + import barstools.tapeout.transforms._ + + it should "pass simple testbench" in { + val optionsManager = new TesterOptionsManager { + firrtlOptions = firrtlOptions.copy( + compilerName = "verilog" + // annotations = List(TechnologyLocation("./RealTech")) + ) + testerOptions = testerOptions.copy(isVerbose = true, backendName = "verilator", displayBase = 10) + commonOptions = commonOptions.copy(targetDirName = "test_run_dir/PadsTB") + } + iotesters.Driver.execute(() => new ExampleTopModuleWithBB, optionsManager) { c => + val dir = optionsManager.commonOptions.targetDirName + checkOutputs(dir) + new SimpleTopModuleTester(c) + } should be (true) + } +/* + it should "create proper IO pads + black box in low firrtl" in { + val optionsManager = new ExecutionOptionsManager("barstools") with HasChiselExecutionOptions with HasFirrtlOptions { + firrtlOptions = firrtlOptions.copy(compilerName = "low") + commonOptions = commonOptions.copy(targetDirName = "test_run_dir/LoFirrtl") + //commonOptions = commonOptions.copy(globalLogLevel = logger.LogLevel.Info) + } + val success = chisel3.Driver.execute(optionsManager, () => new ExampleTopModuleWithBB) match { + case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, firrtl))) => + firrtl should include ("ExampleTopModuleWithBB_PadFrame") + firrtl should include ("ExampleTopModuleWithBB_Internal") + firrtl should not include ("FakeBBPlaceholder") + true + case _ => false + } + success should be (true) + } +*/ + it should "create proper IO pads + black box in verilog" in { + val optionsManager = new ExecutionOptionsManager("barstools") with HasChiselExecutionOptions with HasFirrtlOptions { + firrtlOptions = firrtlOptions.copy( + compilerName = "verilog" + ) + commonOptions = commonOptions.copy(targetDirName = "test_run_dir/PadsVerilog") + //commonOptions = commonOptions.copy(globalLogLevel = logger.LogLevel.Info) + } + val success = chisel3.Driver.execute(optionsManager, () => new ExampleTopModuleWithBB) match { + case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, verilog))) => + true + case _ => false + } + success should be (true) + val dir = optionsManager.commonOptions.targetDirName + checkOutputs(dir) + } + +} \ No newline at end of file From e8dc1035bf6e11b036ded0a77d4220a354319d74 Mon Sep 17 00:00:00 2001 From: Adam Izraelevitz Date: Thu, 23 Feb 2017 13:41:17 -0800 Subject: [PATCH 010/273] Fix for firrtl issue 459, reworking annotation API --- tapeout/src/main/scala/transforms/ConvertToExtModPass.scala | 2 +- tapeout/src/main/scala/transforms/EnumerateModules.scala | 2 +- tapeout/src/main/scala/transforms/ReParentCircuit.scala | 2 +- tapeout/src/main/scala/transforms/RemoveUnusedModules.scala | 2 +- .../src/main/scala/transforms/RenameModulesAndInstances.scala | 2 +- tapeout/src/main/scala/transforms/ResetInverter.scala | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala index 98425fd0c..22e6bda2f 100644 --- a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala +++ b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala @@ -31,6 +31,6 @@ class ConvertToExtMod(classify: (Module) => Boolean) extends Transform with Pass def passSeq = Seq(new ConvertToExtModPass(classify)) def execute(state: CircuitState): CircuitState = { - CircuitState(runPasses(state.circuit), state.form) + state.copy(circuit = runPasses(state.circuit)) } } diff --git a/tapeout/src/main/scala/transforms/EnumerateModules.scala b/tapeout/src/main/scala/transforms/EnumerateModules.scala index ec4389c62..27d3e9e80 100644 --- a/tapeout/src/main/scala/transforms/EnumerateModules.scala +++ b/tapeout/src/main/scala/transforms/EnumerateModules.scala @@ -27,6 +27,6 @@ class EnumerateModules(enumerate: (Module) => Unit) extends Transform with PassB def passSeq = Seq(new EnumerateModulesPass(enumerate)) def execute(state: CircuitState): CircuitState = { - CircuitState(runPasses(state.circuit), state.form) + state.copy(circuit = runPasses(state.circuit)) } } diff --git a/tapeout/src/main/scala/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/transforms/ReParentCircuit.scala index da3f079a6..acf26c98e 100644 --- a/tapeout/src/main/scala/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/transforms/ReParentCircuit.scala @@ -21,6 +21,6 @@ class ReParentCircuit(newTopName: String) extends Transform with PassBased { def passSeq = Seq(new ReParentCircuitPass(newTopName)) def execute(state: CircuitState): CircuitState = { - CircuitState(runPasses(state.circuit), state.form) + state.copy(circuit = runPasses(state.circuit)) } } diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala index d68edbea5..72a7aaf31 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -54,6 +54,6 @@ class RemoveUnusedModules extends Transform with PassBased { def passSeq = Seq(new RemoveUnusedModulesPass) def execute(state: CircuitState): CircuitState = { - CircuitState(runPasses(state.circuit), state.form) + state.copy(circuit = runPasses(state.circuit)) } } diff --git a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala index 2a940563d..6adeacf09 100644 --- a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala +++ b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala @@ -37,6 +37,6 @@ class RenameModulesAndInstances(rename: (String) => String) extends Transform wi def passSeq = Seq(new RenameModulesAndInstancesPass(rename)) def execute(state: CircuitState): CircuitState = { - CircuitState(runPasses(state.circuit), state.form) + state.copy(circuit = runPasses(state.circuit)) } } diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index f0bd34498..c699499e5 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -48,9 +48,9 @@ class ResetInverterTransform extends Transform { override def execute(state: CircuitState): CircuitState = { getMyAnnotations(state) match { - case Nil => CircuitState(state.circuit, LowForm) + case Nil => state case Seq(ResetInverterAnnotation(ModuleName(state.circuit.main, CircuitName(_)))) => - CircuitState(ResetN.run(state.circuit), LowForm) + state.copy(circuit = ResetN.run(state.circuit)) case annotations => throw new Exception(s"There should be only one InvertReset annotation: got ${annotations.mkString(" -- ")}") } From 4745d299120fbd66529f8098d4a46d1f8b12a206 Mon Sep 17 00:00:00 2001 From: Adam Izraelevitz Date: Tue, 14 Mar 2017 23:00:49 -0700 Subject: [PATCH 011/273] Fix transforms for firrtl/#459 issue. (#13) --- .../src/main/scala/transforms/clkgen/ClkSrcTransform.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala index a003abf8b..d5e887c6d 100644 --- a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala +++ b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala @@ -21,7 +21,7 @@ class ClkSrcTransform extends Transform with SimpleRun { InferTypes, new CreateClkConstraints(clkModAnnos, clkPortAnnos, targetDir) ) - CircuitState(runPasses(state.circuit, passSeq), LowForm) + state.copy(state = runPasses(state.circuit, passSeq), outputForm = outputForm) } } -} \ No newline at end of file +} From 164bf2152c37430bcbc1b4c39c66920d6896ed66 Mon Sep 17 00:00:00 2001 From: edwardcwang Date: Tue, 14 Mar 2017 23:24:31 -0700 Subject: [PATCH 012/273] RegInit is no longer in util (#14) --- tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala b/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala index 755a66aaa..3f41a1285 100644 --- a/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala +++ b/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala @@ -2,7 +2,6 @@ package barstools.tapeout.transforms.clkgen import chisel3.experimental.{withClockAndReset, withClock, withReset} import chisel3._ -import chisel3.util.RegInit import barstools.tapeout.transforms._ import chisel3.util.HasBlackBoxInline @@ -125,4 +124,4 @@ class SEClkDivider(divBy: Int, phases: Seq[Int], analogFile: String = "", syncRe } else throw new Exception("Clock divider Verilog file invalid!") } -} \ No newline at end of file +} From f7056f3529e34489b223993e975da0b2d66068a8 Mon Sep 17 00:00:00 2001 From: Angie Wang Date: Tue, 14 Mar 2017 23:59:57 -0700 Subject: [PATCH 013/273] Fft changes (#15) * modified CustomBundle to also apply on Int * programmatic bundle should take T <: Data instead of Data * turns out indexedElements doesn't synthesize * had to change a bunch of files to get clk/pads compiling again with recent firrtl mods --- .../transforms/clkgen/ClkSrcTransform.scala | 2 +- .../transforms/clkgen/CreateClkConstraints.scala | 2 +- .../transforms/pads/AddIOPadsTransform.scala | 11 +++++------ .../transforms/utils/ProgrammaticBundle.scala | 16 +++++++++------- .../scala/transforms/ResetInverterSpec.scala | 1 - 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala index d5e887c6d..4a447cb78 100644 --- a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala +++ b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala @@ -21,7 +21,7 @@ class ClkSrcTransform extends Transform with SimpleRun { InferTypes, new CreateClkConstraints(clkModAnnos, clkPortAnnos, targetDir) ) - state.copy(state = runPasses(state.circuit, passSeq), outputForm = outputForm) + state.copy(circuit = runPasses(state.circuit, passSeq)) } } } diff --git a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala index 2e5021de5..ea2ba22f3 100644 --- a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala +++ b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala @@ -89,7 +89,7 @@ class CreateClkConstraints( }).toSet val inlineTransform = new InlineInstances - val inlinedCircuit = inlineTransform.run(onlyClockCircuit, modulesToInline, Set()).circuit + val inlinedCircuit = inlineTransform.run(onlyClockCircuit, modulesToInline, Set(), None).circuit val topModule = inlinedCircuit.modules.find(_.name == top).getOrElse(throwInternalError) diff --git a/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala index 2531a6321..df4536a53 100644 --- a/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala +++ b/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala @@ -16,7 +16,7 @@ class AddIOPadsTransform extends Transform with SimpleRun { val collectedAnnos = HasPadAnnotation(getMyAnnotations(state)) collectedAnnos match { // Transform not used - case None => CircuitState(state.circuit, LowForm) + case None => state case Some(x) => val techLoc = (new TechnologyLocation).get(state) // Get foundry pad templates from yaml @@ -45,11 +45,10 @@ class AddIOPadsTransform extends Transform with SimpleRun { ) // Expects BlackBox helper to be run after to inline pad Verilog! val prevAnnos = state.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations - val cs = CircuitState( - runPasses(circuitWithBBs, passSeq), - LowForm, - Some(AnnotationMap(prevAnnos ++ bbAnnotations)) - ) + val cs = state.copy( + circuit = runPasses(circuitWithBBs, passSeq), + annotations = Some(AnnotationMap(prevAnnos ++ bbAnnotations))) + // TODO: *.f file is overwritten on subsequent executions, but it doesn't seem to be used anywhere? (new firrtl.transforms.BlackBoxSourceHelper).execute(cs) } diff --git a/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala b/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala index 7df199ba9..17197c805 100644 --- a/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala +++ b/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala @@ -3,22 +3,24 @@ package barstools.tapeout.transforms import chisel3._ import scala.collection.immutable.ListMap -final class CustomBundle(elts: (String, Data)*) extends Record { +final class CustomBundle[T <: Data](elts: (String, T)*) extends Record { val elements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) - def apply(elt: String): Data = elements(elt) + def apply(elt: String): T = elements(elt) + def apply(elt: Int): T = elements(elt.toString) override def cloneType = (new CustomBundle(elements.toList: _*)).asInstanceOf[this.type] } -final class CustomIndexedBundle(elts: (Int, Data)*) extends Record { +final class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { // Must be String, Data val elements = ListMap(elts map { case (field, elt) => field.toString -> elt.chiselCloneType }: _*) - def indexedElements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) - def apply(elt: Int): Data = elements(elt.toString) + // TODO: Make an equivalent to the below work publicly + private def indexedElements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) + def apply(elt: Int): T = elements(elt.toString) override def cloneType = (new CustomIndexedBundle(indexedElements.toList: _*)).asInstanceOf[this.type] } object CustomIndexedBundle { - def apply(gen: Data, idxs: Seq[Int]) = new CustomIndexedBundle(idxs.map(_ -> gen): _*) + def apply[T <: Data](gen: T, idxs: Seq[Int]) = new CustomIndexedBundle(idxs.map(_ -> gen): _*) // Allows Vecs of elements of different types/widths - def apply(gen: Seq[Data]) = new CustomIndexedBundle(gen.zipWithIndex.map{ case (elt, field) => field -> elt }: _*) + def apply[T <: Data](gen: Seq[T]) = new CustomIndexedBundle(gen.zipWithIndex.map{ case (elt, field) => field -> elt }: _*) } \ No newline at end of file diff --git a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala index fd49435ed..0ae41dc1e 100644 --- a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala @@ -3,7 +3,6 @@ package barstools.tapeout.transforms import chisel3._ -import chisel3.util.RegInit import firrtl._ import org.scalatest.{FreeSpec, Matchers} From d039935642f4537aebe70de4a6b305cb9484480c Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 15 Mar 2017 00:28:30 -0700 Subject: [PATCH 014/273] Typo --- .../src/main/scala/transforms/RenameModulesAndInstances.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala index 6adeacf09..f0a4dd80f 100644 --- a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala +++ b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala @@ -7,7 +7,7 @@ import firrtl.ir._ import firrtl.passes.Pass // This doesn't rename ExtModules under the assumption that they're some -// Verilog black box and therefor can't be renamed. Since the point is to +// Verilog black box and therefore can't be renamed. Since the point is to // allow FIRRTL to be linked together using "cat" and ExtModules don't get // emitted, this should be safe. class RenameModulesAndInstancesPass(rename: (String) => String) extends Pass { From 35b325dc8194fb24c76a3ab447fa9758fcbd5e5c Mon Sep 17 00:00:00 2001 From: Adam Izraelevitz Date: Wed, 15 Mar 2017 12:16:22 -0700 Subject: [PATCH 015/273] Update README.md with example invocation (#16) --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 6429c26af..bc6f3d9b9 100644 --- a/README.md +++ b/README.md @@ -3,3 +3,15 @@ Useful utilities for BAR projects Passes/Transforms that could be useful if added here: * Check that a module was de-duplicated. Useful for MIM CAD flows and currently done in python. + +Be sure to publish-local the following repositories: +* ucb-bar/chisel-testers (requires ucb-bar/firrtl-interpreter) +* ucb-bar/firrtl + +Example Usage: +``` +sbt +> compile +> project tapeout +> run-main barstools.tapeout.transforms.GenerateTop -i .fir -o .v --syn-top --harness-top +``` From 2d7806ca798f64d17fcc8b7aa69a717eaa3e1b1f Mon Sep 17 00:00:00 2001 From: chick Date: Thu, 16 Mar 2017 11:48:53 -0700 Subject: [PATCH 016/273] I would like to take the scalatest version here back to 2.2.5 because it causes problems with IntelliJ right now. I don't see any specific features of 3.0.0 that are being used here. --- project/Dependencies.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index f4423b4db..52c074b1a 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -2,7 +2,7 @@ import sbt._ import Keys._ object Dependencies { - val scalatestVersion = "3.0.0" + val scalatestVersion = "2.2.5" val scalatest = "org.scalatest" %% "scalatest" % scalatestVersion % "test" val scalacheckVersion = "1.12.4" val scalacheck = "org.scalacheck" %% "scalacheck" % scalacheckVersion % "test" From f4a8715fa4c7a96dabb380f29cf843bb01ad0dd0 Mon Sep 17 00:00:00 2001 From: Stevo Date: Wed, 22 Mar 2017 14:37:26 -0700 Subject: [PATCH 017/273] Combine generates, make it a trait (#11) * [stevo]: combine generates, make it a trait * [stevo]: add Generator ala rocket-chip, some other cleanup * [stevo]: remove Generator, since that generates firrtl... * [stevo]: still debugging * [stevo]: okay i think it works now * [stevo]: oops * Refactor new generate code. Mostly just style stuff. --- .../src/main/scala/transforms/Generate.scala | 217 ++++++++++++++++++ .../scala/transforms/GenerateHarness.scala | 79 ------- .../main/scala/transforms/GenerateTop.scala | 77 ------- .../transforms/GenerateTopAndHarness.scala | 120 ---------- 4 files changed, 217 insertions(+), 276 deletions(-) create mode 100644 tapeout/src/main/scala/transforms/Generate.scala delete mode 100644 tapeout/src/main/scala/transforms/GenerateHarness.scala delete mode 100644 tapeout/src/main/scala/transforms/GenerateTop.scala delete mode 100644 tapeout/src/main/scala/transforms/GenerateTopAndHarness.scala diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala new file mode 100644 index 000000000..79bbd3b0f --- /dev/null +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -0,0 +1,217 @@ +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.annotations._ +import firrtl.passes.Pass + +import java.io.File +import firrtl.annotations.AnnotationYamlProtocol._ +import net.jcazevedo.moultingyaml._ +import com.typesafe.scalalogging.LazyLogging + +object AllModules { + private var modules = Set[String]() + def add(module: String) = { + modules = modules | Set(module) + } + def rename(module: String) = { + var new_name = module + while (modules.contains(new_name)) + new_name = new_name + "_inTestHarness" + new_name + } +} + +case class ParsedInput(args: Seq[String]) extends LazyLogging { + var input: Option[String] = None + var output: Option[String] = None + var topOutput: Option[String] = None + var harnessOutput: Option[String] = None + var annoFile: Option[String] = None + var synTop: Option[String] = None + var harnessTop: Option[String] = None + var seqMemFlags: Option[String] = Some("-o:unused.confg") + var listClocks: Option[String] = Some("-o:unused.clocks") + + var usedOptions = Set.empty[Integer] + args.zipWithIndex.foreach{ case (arg, i) => + arg match { + case "-i" => { + input = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "-o" => { + output = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--top-o" => { + topOutput = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--harness-o" => { + harnessOutput = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--anno-file" => { + annoFile = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--syn-top" => { + synTop = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--harness-top" => { + harnessTop = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--seq-mem-flags" => { + seqMemFlags = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case "--list-clocks" => { + listClocks = Some(args(i+1)) + usedOptions = usedOptions | Set(i+1) + } + case _ => { + if (! (usedOptions contains i)) { + logger.error("Unknown option " + arg) + } + } + } + } + +} + +// Requires two phases, one to collect modules below synTop in the hierarchy +// and a second to remove those modules to generate the test harness +sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { + lazy val options: ParsedInput = ParsedInput(args) + lazy val input = options.input + lazy val output = options.output + lazy val topOutput = options.topOutput + lazy val harnessOutput = options.harnessOutput + lazy val annoFile = options.annoFile + lazy val synTop = options.synTop + lazy val harnessTop = options.harnessTop + lazy val seqMemFlags = options.seqMemFlags + lazy val listClocks = options.listClocks + + private def getFirstPhasePasses(top: Boolean, harness: Boolean): Seq[Transform] = { + val pre = Seq( + new ReParentCircuit(synTop.get), + new RemoveUnusedModules + ) + + val enumerate = if (harness) { Seq( + new EnumerateModules( { m => if (m.name != options.synTop.get) { AllModules.add(m.name) } } ) + ) } else Seq() + + val post = if (top) { Seq( + new passes.memlib.InferReadWrite(), + new passes.memlib.ReplSeqMem(), + new passes.clocklist.ClockListTransform() + ) } else Seq() + + pre ++ enumerate ++ post + } + + private def getFirstPhaseAnnotations(top: Boolean): AnnotationMap = { + if (top) { + //Load annotations from file + val annotationArray = annoFile match { + case None => Array[Annotation]() + case Some(fileName) => { + val annotations = new File(fileName) + if(annotations.exists) { + val annotationsYaml = io.Source.fromFile(annotations).getLines().mkString("\n").parseYaml + annotationsYaml.convertTo[Array[Annotation]] + } else { + Array[Annotation]() + } + } + } + // add new annotations + AnnotationMap(Seq( + passes.memlib.InferReadWriteAnnotation( + s"${synTop.get}" + ), + passes.clocklist.ClockListAnnotation( + s"-c:${synTop.get}:-m:${synTop.get}:${listClocks.get}" + ), + passes.memlib.ReplSeqMemAnnotation( + s"-c:${synTop.get}:${seqMemFlags.get}" + ) + ) ++ annotationArray) + } else { AnnotationMap(Seq.empty) } + } + + private def getSecondPhasePasses: Seq[Transform] = { + // always the same for now + Seq( + new ConvertToExtMod((m) => m.name == synTop.get), + new RemoveUnusedModules, + new RenameModulesAndInstances((m) => AllModules.rename(m)) + ) + } + + // always the same for now + private def getSecondPhaseAnnotations: AnnotationMap = AnnotationMap(Seq.empty) + + // Top Generation + protected def firstPhase(top: Boolean, harness: Boolean): Unit = { + require(top || harness, "Must specify either top or harness") + firrtl.Driver.compile( + input.get, + topOutput.getOrElse(output.get), + new VerilogCompiler(), + Parser.UseInfo, + getFirstPhasePasses(top, harness), + getFirstPhaseAnnotations(top) + ) + } + + // Harness Generation + protected def secondPhase: Unit = { + firrtl.Driver.compile( + input.get, + harnessOutput.getOrElse(output.get), + new VerilogCompiler(), + Parser.UseInfo, + getSecondPhasePasses, + getSecondPhaseAnnotations + ) + } +} + +object GenerateTop extends GenerateTopAndHarnessApp { + // warn about unused options + harnessOutput.foreach(n => logger.warn(s"Not using harness output filename $n since you asked for just a top-level output.")) + topOutput.foreach(_.foreach{ + n => logger.warn(s"Not using generic output filename $n since you asked for just a top-level output and also specified a generic output.")}) + // Only need a single phase to generate the top module + firstPhase(top = true, harness = false) +} + +object GenerateHarness extends GenerateTopAndHarnessApp { + // warn about unused options + topOutput.foreach(n => logger.warn(s"Not using top-level output filename $n since you asked for just a test harness.")) + annoFile.foreach(n => logger.warn(s"Not using annotations file $n since you asked for just a test harness.")) + seqMemFlags.filter(_ != "-o:unused.confg").foreach { + n => logger.warn(s"Not using SeqMem flags $n since you asked for just a test harness.") } + listClocks.filter(_ != "-o:unused.clocks").foreach { + n => logger.warn(s"Not using clocks list $n since you asked for just a test harness.") } + harnessOutput.foreach(_.foreach{ + n => logger.warn(s"Not using generic output filename $n since you asked for just a test harness and also specified a generic output.")}) + // Do minimal work for the first phase to generate test harness + firstPhase(top = false, harness = true) + secondPhase +} + +object GenerateTopAndHarness extends GenerateTopAndHarnessApp { + // warn about unused options + output.foreach(n => logger.warn(s"Not using generic output filename $n since you asked for both a top-level output and a test harness.")) + // Do everything, top and harness generation + firstPhase(top = true, harness = true) + secondPhase +} diff --git a/tapeout/src/main/scala/transforms/GenerateHarness.scala b/tapeout/src/main/scala/transforms/GenerateHarness.scala deleted file mode 100644 index eea7960ee..000000000 --- a/tapeout/src/main/scala/transforms/GenerateHarness.scala +++ /dev/null @@ -1,79 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.ir._ -import firrtl.annotations._ -import firrtl.passes.Pass - -object AllModules { - private var modules = Set[String]() - def add(module: String) = { - modules = modules | Set(module) - } - def rename(module: String) = { - var new_name = module - while (modules.contains(new_name)) - new_name = new_name + "_inTestHarness" - new_name - } -} - -object GenerateHarness extends App { - var input: Option[String] = None - var output: Option[String] = None - var synTop: Option[String] = None - var harnessTop: Option[String] = None - - var usedOptions = Set.empty[Integer] - args.zipWithIndex.foreach{ case (arg, i) => - arg match { - case "-i" => { - input = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "-o" => { - output = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--syn-top" => { - synTop = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--harness-top" => { - harnessTop = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case _ => { - if (! (usedOptions contains i)) { - error("Unknown option " + arg) - } - } - } - } - - firrtl.Driver.compile( - input.get, - output.get, - new VerilogCompiler(), - Parser.UseInfo, - Seq( - new ReParentCircuit(synTop.get), - new RemoveUnusedModules, - new EnumerateModules( { m => if (m.name != synTop.get) { AllModules.add(m.name) } } ) - ) - ) - - firrtl.Driver.compile( - input.get, - output.get, - new VerilogCompiler(), - Parser.UseInfo, - Seq( - new ConvertToExtMod((m) => m.name == synTop.get), - new RemoveUnusedModules, - new RenameModulesAndInstances((m) => AllModules.rename(m)) - ) - ) -} diff --git a/tapeout/src/main/scala/transforms/GenerateTop.scala b/tapeout/src/main/scala/transforms/GenerateTop.scala deleted file mode 100644 index dc069a5b1..000000000 --- a/tapeout/src/main/scala/transforms/GenerateTop.scala +++ /dev/null @@ -1,77 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.ir._ -import firrtl.annotations._ -import firrtl.passes.Pass - -object GenerateTop extends App { - var input: Option[String] = None - var output: Option[String] = None - var synTop: Option[String] = None - var harnessTop: Option[String] = None - var seqMemFlags: Option[String] = Some("-o:unused.confg") - var listClocks: Option[String] = Some("-o:unused.clocks") - - var usedOptions = Set.empty[Integer] - args.zipWithIndex.foreach{ case (arg, i) => - arg match { - case "-i" => { - input = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "-o" => { - output = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--syn-top" => { - synTop = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--harness-top" => { - harnessTop = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--seq-mem-flags" => { - seqMemFlags = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--list-clocks" => { - listClocks = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case _ => { - if (! (usedOptions contains i)) { - error("Unknown option " + arg) - } - } - } - } - - firrtl.Driver.compile( - input.get, - output.get, - new VerilogCompiler(), - Parser.UseInfo, - Seq( - new ReParentCircuit(synTop.get), - new RemoveUnusedModules, - new passes.memlib.InferReadWrite(), - new passes.memlib.ReplSeqMem(), - new passes.clocklist.ClockListTransform() - ), - AnnotationMap(Seq( - passes.memlib.InferReadWriteAnnotation( - s"${synTop.get}" - ), - passes.clocklist.ClockListAnnotation( - s"-c:${synTop.get}:-m:${synTop.get}:${listClocks.get}" - ), - passes.memlib.ReplSeqMemAnnotation( - s"-c:${synTop.get}:${seqMemFlags.get}" - ) - )) - ) -} diff --git a/tapeout/src/main/scala/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/transforms/GenerateTopAndHarness.scala deleted file mode 100644 index 06dbe1554..000000000 --- a/tapeout/src/main/scala/transforms/GenerateTopAndHarness.scala +++ /dev/null @@ -1,120 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.ir._ -import firrtl.annotations._ -import firrtl.passes.Pass - -import java.io.File -import firrtl.annotations.AnnotationYamlProtocol._ -import net.jcazevedo.moultingyaml._ - -object GenerateTopAndHarness extends App { - var input: Option[String] = None - var topOutput: Option[String] = None - var harnessOutput: Option[String] = None - var annoFile: Option[String] = None - var synTop: Option[String] = None - var harnessTop: Option[String] = None - var seqMemFlags: Option[String] = Some("-o:unused.confg") - var listClocks: Option[String] = Some("-o:unused.clocks") - - var usedOptions = Set.empty[Integer] - args.zipWithIndex.foreach{ case (arg, i) => - arg match { - case "-i" => { - input = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--top-o" => { - topOutput = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--harness-o" => { - harnessOutput = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--anno-file" => { - annoFile = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--syn-top" => { - synTop = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--harness-top" => { - harnessTop = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--seq-mem-flags" => { - seqMemFlags = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--list-clocks" => { - listClocks = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case _ => { - if (! (usedOptions contains i)) { - error("Unknown option " + arg) - } - } - } - } - //Load annotations from file - val annotationArray = annoFile match { - case None => Array[Annotation]() - case Some(fileName) => { - val annotations = new File(fileName) - if(annotations.exists) { - val annotationsYaml = io.Source.fromFile(annotations).getLines().mkString("\n").parseYaml - annotationsYaml.convertTo[Array[Annotation]] - } else { - Array[Annotation]() - } - } - } - - //Top Generation - firrtl.Driver.compile( - input.get, - topOutput.get, - new VerilogCompiler(), - Parser.UseInfo, - Seq( - new ReParentCircuit(synTop.get), - new RemoveUnusedModules, - new EnumerateModules( { m => if (m.name != synTop.get) { AllModules.add(m.name) } } ), - new passes.memlib.InferReadWrite(), - new passes.memlib.ReplSeqMem(), - new passes.clocklist.ClockListTransform() - ), - AnnotationMap(Seq( - passes.memlib.InferReadWriteAnnotation( - s"${synTop.get}" - ), - passes.clocklist.ClockListAnnotation( - s"-c:${synTop.get}:-m:${synTop.get}:${listClocks.get}" - ), - passes.memlib.ReplSeqMemAnnotation( - s"-c:${synTop.get}:${seqMemFlags.get}" - ) - ) ++ annotationArray) - ) - - //Harness Generation - firrtl.Driver.compile( - input.get, - harnessOutput.get, - new VerilogCompiler(), - Parser.UseInfo, - Seq( - new ConvertToExtMod((m) => m.name == synTop.get), - new RemoveUnusedModules, - new RenameModulesAndInstances((m) => AllModules.rename(m)) - ) - ) -} - From 5574354f5502df2c0271e5bc92095ab98ebe3eec Mon Sep 17 00:00:00 2001 From: Angie Wang Date: Sun, 2 Apr 2017 03:49:49 -0700 Subject: [PATCH 018/273] Fft changes (#17) * modified CustomBundle to also apply on Int * programmatic bundle should take T <: Data instead of Data * turns out indexedElements doesn't synthesize * had to change a bunch of files to get clk/pads compiling again with recent firrtl mods * modified CustomBundle to also apply on Int * programmatic bundle should take T <: Data instead of Data * turns out indexedElements doesn't synthesize * had to change a bunch of files to get clk/pads compiling again with recent firrtl mods * clk phases should be less than divby amount * make clkconstraint error more descriptive * don't make custom*bundle final * nevermind. bundles need to be final. * turns out making the bundle non-final was ok... * removed infertypes from clksrctransform. seems like it doesn't work @ low firrtl? --- tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala | 3 +++ .../main/scala/transforms/clkgen/ClkSrcTransform.scala | 3 ++- .../scala/transforms/clkgen/CreateClkConstraints.scala | 5 +++-- .../main/scala/transforms/utils/ProgrammaticBundle.scala | 8 ++++---- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala b/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala index 3f41a1285..23402982f 100644 --- a/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala +++ b/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala @@ -72,6 +72,9 @@ class SEClkDivider(divBy: Int, phases: Seq[Int], analogFile: String = "", syncRe extends Module with IsClkModule { require(phases.distinct.length == phases.length, "Phases should be distinct!") + phases foreach { p => + require(p < divBy, "Phases must be < divBy") + } val io = IO(new SEClkDividerIO(phases)) diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala index 4a447cb78..a98086d18 100644 --- a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala +++ b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala @@ -18,7 +18,8 @@ class ClkSrcTransform extends Transform with SimpleRun { case Some((clkModAnnos, clkPortAnnos)) => val targetDir = barstools.tapeout.transforms.GetTargetDir(state) val passSeq = Seq( - InferTypes, + // TODO: Enable when it's legal? + // InferTypes, new CreateClkConstraints(clkModAnnos, clkPortAnnos, targetDir) ) state.copy(circuit = runPasses(state.circuit, passSeq)) diff --git a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala index ea2ba22f3..0c6e88238 100644 --- a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala +++ b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala @@ -117,7 +117,7 @@ class CreateClkConstraints( // sources of sinks are generated clks or top level clk inputs if (clkSrcsFlip.contains(sourceAbsPath)) clkSrcsFlip(sourceAbsPath) else if (topClksFlip.contains(sourceAbsPath)) topClksFlip(sourceAbsPath) - else throw new Exception(s"Absolute path of clk source for $sinkId not found!") + else throw new Exception(s"Absolute path $sourceAbsPath of clk source for $sinkId not found!") } sinkId -> sourceId } @@ -131,7 +131,8 @@ class CreateClkConstraints( clkPortAnnos.find(x => // TODO: Not sufficiently general for output clks? Might have forgotten to label a clk module... LowerName(x.target.name) == n && x.target.module.name == mod.name).getOrElse( - throw new Exception("All top module input clks/clk module output clocks must be sinks/sources!")) + throw new Exception( + s"All top module input clks/clk module output clocks must be sinks/sources! $n not annotated!")) case _ => } } diff --git a/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala b/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala index 17197c805..c3eec670a 100644 --- a/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala +++ b/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala @@ -3,18 +3,18 @@ package barstools.tapeout.transforms import chisel3._ import scala.collection.immutable.ListMap -final class CustomBundle[T <: Data](elts: (String, T)*) extends Record { +class CustomBundle[T <: Data](elts: (String, T)*) extends Record { val elements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) def apply(elt: String): T = elements(elt) def apply(elt: Int): T = elements(elt.toString) override def cloneType = (new CustomBundle(elements.toList: _*)).asInstanceOf[this.type] } -final class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { +class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { // Must be String, Data val elements = ListMap(elts map { case (field, elt) => field.toString -> elt.chiselCloneType }: _*) - // TODO: Make an equivalent to the below work publicly - private def indexedElements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) + // TODO: Make an equivalent to the below work publicly (or only on subclasses?) + def indexedElements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) def apply(elt: Int): T = elements(elt.toString) override def cloneType = (new CustomIndexedBundle(indexedElements.toList: _*)).asInstanceOf[this.type] } From a13869b6aa5b0c5da00d974c4de7229ad400834a Mon Sep 17 00:00:00 2001 From: Angie Wang Date: Sun, 2 Apr 2017 04:10:46 -0700 Subject: [PATCH 019/273] Refactor repo for lastest changes to firrtl transform api changes (#19) --- .../transforms/ConvertToExtModPass.scala | 10 +++---- .../scala/transforms/EnumerateModules.scala | 8 +++--- .../scala/transforms/ReParentCircuit.scala | 9 +++---- .../transforms/RemoveUnusedModules.scala | 8 +++--- .../RenameModulesAndInstances.scala | 8 +++--- .../main/scala/transforms/ResetInverter.scala | 1 - .../transforms/clkgen/ClkSrcTransform.scala | 22 +++++++++------ .../clkgen/CreateClkConstraints.scala | 2 -- .../transforms/pads/AddIOPadsTransform.scala | 27 ++++++++++++------- .../scala/transforms/pads/AddPadFrame.scala | 4 +-- .../transforms/pads/AnnotatePortPads.scala | 1 - 11 files changed, 53 insertions(+), 47 deletions(-) diff --git a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala index 22e6bda2f..46e12ed0d 100644 --- a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala +++ b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala @@ -3,6 +3,7 @@ package barstools.tapeout.transforms import firrtl._ +import firrtl.annotations.CircuitName import firrtl.ir._ import firrtl.passes.Pass @@ -10,8 +11,6 @@ import firrtl.passes.Pass // that function returns "true" then the module is converted into an ExtModule, // otherwise it's left alone. class ConvertToExtModPass(classify: (Module) => Boolean) extends Pass { - def name = "Convert to External Modules" - def run(c: Circuit): Circuit = { val modulesx = c.modules.map { case m: ExtModule => m @@ -25,12 +24,13 @@ class ConvertToExtModPass(classify: (Module) => Boolean) extends Pass { Circuit(c.info, modulesx, c.main) } } -class ConvertToExtMod(classify: (Module) => Boolean) extends Transform with PassBased { +class ConvertToExtMod(classify: (Module) => Boolean) extends Transform with SeqTransformBased { def inputForm = MidForm def outputForm = MidForm - def passSeq = Seq(new ConvertToExtModPass(classify)) + def transforms = Seq(new ConvertToExtModPass(classify)) def execute(state: CircuitState): CircuitState = { - state.copy(circuit = runPasses(state.circuit)) + val ret = runTransforms(state) + CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) } } diff --git a/tapeout/src/main/scala/transforms/EnumerateModules.scala b/tapeout/src/main/scala/transforms/EnumerateModules.scala index 27d3e9e80..11da911e3 100644 --- a/tapeout/src/main/scala/transforms/EnumerateModules.scala +++ b/tapeout/src/main/scala/transforms/EnumerateModules.scala @@ -7,7 +7,6 @@ import firrtl.ir._ import firrtl.passes.Pass class EnumerateModulesPass(enumerate: (Module) => Unit) extends Pass { - def name = "Enumurate Modules" def run(c: Circuit): Circuit = { val modulesx = c.modules.map { @@ -21,12 +20,13 @@ class EnumerateModulesPass(enumerate: (Module) => Unit) extends Pass { } } -class EnumerateModules(enumerate: (Module) => Unit) extends Transform with PassBased { +class EnumerateModules(enumerate: (Module) => Unit) extends Transform with SeqTransformBased { def inputForm = LowForm def outputForm = LowForm - def passSeq = Seq(new EnumerateModulesPass(enumerate)) + def transforms = Seq(new EnumerateModulesPass(enumerate)) def execute(state: CircuitState): CircuitState = { - state.copy(circuit = runPasses(state.circuit)) + val ret = runTransforms(state) + CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) } } diff --git a/tapeout/src/main/scala/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/transforms/ReParentCircuit.scala index acf26c98e..bee7a6b35 100644 --- a/tapeout/src/main/scala/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/transforms/ReParentCircuit.scala @@ -8,19 +8,18 @@ import firrtl.passes.Pass // "Re-Parents" a circuit, which changes the top module to something else. class ReParentCircuitPass(newTopName: String) extends Pass { - def name = "Re-Parent Circuit" - def run(c: Circuit): Circuit = { Circuit(c.info, c.modules, newTopName) } } -class ReParentCircuit(newTopName: String) extends Transform with PassBased { +class ReParentCircuit(newTopName: String) extends Transform with SeqTransformBased { def inputForm = HighForm def outputForm = HighForm - def passSeq = Seq(new ReParentCircuitPass(newTopName)) + def transforms = Seq(new ReParentCircuitPass(newTopName)) def execute(state: CircuitState): CircuitState = { - state.copy(circuit = runPasses(state.circuit)) + val ret = runTransforms(state) + CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) } } diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala index 72a7aaf31..848b7a2c8 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -9,7 +9,6 @@ import firrtl.passes.Pass // Removes all the unused modules in a circuit by recursing through every // instance (starting at the main module) class RemoveUnusedModulesPass extends Pass { - def name = "Remove Unused Modules" def run(c: Circuit): Circuit = { val modulesByName = c.modules.map{ @@ -48,12 +47,13 @@ class RemoveUnusedModulesPass extends Pass { } } -class RemoveUnusedModules extends Transform with PassBased { +class RemoveUnusedModules extends Transform with SeqTransformBased { def inputForm = MidForm def outputForm = MidForm - def passSeq = Seq(new RemoveUnusedModulesPass) + def transforms = Seq(new RemoveUnusedModulesPass) def execute(state: CircuitState): CircuitState = { - state.copy(circuit = runPasses(state.circuit)) + val ret = runTransforms(state) + CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) } } diff --git a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala index f0a4dd80f..83c3dd719 100644 --- a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala +++ b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala @@ -11,7 +11,6 @@ import firrtl.passes.Pass // allow FIRRTL to be linked together using "cat" and ExtModules don't get // emitted, this should be safe. class RenameModulesAndInstancesPass(rename: (String) => String) extends Pass { - def name = "Rename Modules and Instances" def renameInstances(body: Statement): Statement = { body match { @@ -31,12 +30,13 @@ class RenameModulesAndInstancesPass(rename: (String) => String) extends Pass { } } -class RenameModulesAndInstances(rename: (String) => String) extends Transform with PassBased { +class RenameModulesAndInstances(rename: (String) => String) extends Transform with SeqTransformBased { def inputForm = LowForm def outputForm = LowForm - def passSeq = Seq(new RenameModulesAndInstancesPass(rename)) + def transforms = Seq(new RenameModulesAndInstancesPass(rename)) def execute(state: CircuitState): CircuitState = { - state.copy(circuit = runPasses(state.circuit)) + val ret = runTransforms(state) + CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) } } diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index c699499e5..d2f756f04 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -18,7 +18,6 @@ object ResetInverterAnnotation { } object ResetN extends Pass { - def name: String = "ResetN" private val Bool = UIntType(IntWidth(1)) // Only works on Modules with a Bool port named reset def invertReset(mod: Module): Module = { diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala index a98086d18..f727b1cb9 100644 --- a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala +++ b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala @@ -1,28 +1,34 @@ +// See LICENSE for license details. + package barstools.tapeout.transforms.clkgen import firrtl._ -import firrtl.annotations._ import firrtl.passes._ -import firrtl.ir._ -class ClkSrcTransform extends Transform with SimpleRun { +import scala.collection.mutable + +class ClkSrcTransform extends Transform with SeqTransformBased { override def inputForm: CircuitForm = LowForm override def outputForm: CircuitForm = LowForm + val transformList = new mutable.ArrayBuffer[Transform] + def transforms = transformList + override def execute(state: CircuitState): CircuitState = { val collectedAnnos = HasClkAnnotation(getMyAnnotations(state)) collectedAnnos match { // Transform not used case None => CircuitState(state.circuit, LowForm) - case Some((clkModAnnos, clkPortAnnos)) => + case Some((clkModAnnos, clkPortAnnos)) => val targetDir = barstools.tapeout.transforms.GetTargetDir(state) - val passSeq = Seq( - // TODO: Enable when it's legal? - // InferTypes, + + transformList ++= Seq( + InferTypes, new CreateClkConstraints(clkModAnnos, clkPortAnnos, targetDir) ) - state.copy(circuit = runPasses(state.circuit, passSeq)) + val ret = runTransforms(state) + CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) } } } diff --git a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala index 0c6e88238..1915bdbf7 100644 --- a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala +++ b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala @@ -21,8 +21,6 @@ class CreateClkConstraints( clkPortAnnos: Seq[TargetClkPortAnnoF], targetDir: String) extends Pass { - def name = "Create clock constraints" - // TODO: Are annotations only valid on ports? def run(c: Circuit): Circuit = { diff --git a/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala index df4536a53..f9501f8f8 100644 --- a/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala +++ b/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala @@ -1,3 +1,5 @@ +// See LICENSE for license details. + package barstools.tapeout.transforms.pads import firrtl._ @@ -6,22 +8,27 @@ import firrtl.passes._ import firrtl.ir._ import barstools.tapeout.transforms._ +import scala.collection.mutable + // Main Add IO Pad transform operates on low Firrtl -class AddIOPadsTransform extends Transform with SimpleRun { +class AddIOPadsTransform extends Transform with SeqTransformBased { override def inputForm: CircuitForm = LowForm override def outputForm: CircuitForm = LowForm + val transformList = new mutable.ArrayBuffer[Transform] + def transforms: Seq[Transform] = transformList + override def execute(state: CircuitState): CircuitState = { val collectedAnnos = HasPadAnnotation(getMyAnnotations(state)) collectedAnnos match { // Transform not used case None => state - case Some(x) => + case Some(x) => val techLoc = (new TechnologyLocation).get(state) // Get foundry pad templates from yaml val foundryPads = FoundryPadsYaml.parse(techLoc) - val portPads = AnnotatePortPads(state.circuit, x.topModName, foundryPads, x.componentAnnos, + val portPads = AnnotatePortPads(state.circuit, x.topModName, foundryPads, x.componentAnnos, HasPadAnnotation.getSide(x.defaultPadSide)) val supplyPads = AnnotateSupplyPads(foundryPads, x.supplyAnnos) val (circuitWithBBs, bbAnnotations) = CreatePadBBs(state.circuit, portPads, supplyPads) @@ -30,7 +37,7 @@ class AddIOPadsTransform extends Transform with SimpleRun { val topInternalName = namespace newName s"${x.topModName}_Internal" val targetDir = barstools.tapeout.transforms.GetTargetDir(state) PadPlacementFile.generate(techLoc, targetDir, padFrameName, portPads, supplyPads) - val passSeq = Seq( + transformList ++= Seq( Legalize, ResolveGenders, // Types really need to be known... @@ -44,13 +51,13 @@ class AddIOPadsTransform extends Transform with SimpleRun { ResolveGenders ) // Expects BlackBox helper to be run after to inline pad Verilog! - val prevAnnos = state.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations - val cs = state.copy( - circuit = runPasses(circuitWithBBs, passSeq), - annotations = Some(AnnotationMap(prevAnnos ++ bbAnnotations))) - + val ret = runTransforms(state) + val currentAnnos = ret.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations + val newAnnoMap = AnnotationMap(currentAnnos ++ bbAnnotations) + val newState = CircuitState(ret.circuit, outputForm, Some(newAnnoMap), ret.renames) + // TODO: *.f file is overwritten on subsequent executions, but it doesn't seem to be used anywhere? - (new firrtl.transforms.BlackBoxSourceHelper).execute(cs) + (new firrtl.transforms.BlackBoxSourceHelper).execute(newState) } } } \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/pads/AddPadFrame.scala b/tapeout/src/main/scala/transforms/pads/AddPadFrame.scala index 17b22fbb9..853cfced4 100644 --- a/tapeout/src/main/scala/transforms/pads/AddPadFrame.scala +++ b/tapeout/src/main/scala/transforms/pads/AddPadFrame.scala @@ -5,7 +5,7 @@ package barstools.tapeout.transforms.pads import firrtl.annotations._ import firrtl.ir._ import firrtl._ -import firrtl.passes._ +import firrtl.passes.Pass // Analog is like UInt, SInt; it's not a direction (which is kind of weird) // WARNING: Analog type is associated with Verilog InOut! i.e. even if digital pads are tri-statable, b/c tristate @@ -19,8 +19,6 @@ class AddPadFrame( ioPads: Seq[PortIOPad], supplyPads: Seq[TopSupplyPad]) extends Pass { - def name: String = "Add Padframe" - def run(c: Circuit): Circuit = { // New modules consist of old modules (with top renamed to internal) + padFrame + newTop val newMods = c.modules.map { diff --git a/tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala b/tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala index 1f6911a07..8164463e2 100644 --- a/tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala +++ b/tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala @@ -3,7 +3,6 @@ package barstools.tapeout.transforms.pads import firrtl.annotations._ import firrtl._ import firrtl.ir._ -import firrtl.passes._ import barstools.tapeout.transforms._ // TODO: Make some trait with commonalities between IO Pad + supply pad From 7ad088503f1080962561bee5e4ce954af7330888 Mon Sep 17 00:00:00 2001 From: Angie Wang Date: Sun, 2 Apr 2017 04:12:31 -0700 Subject: [PATCH 020/273] [stevo]: add custom analog annotation (#20) --- .../scala/transforms/AnalogAnnotation.scala | 81 +++++++++++++++++++ .../src/main/scala/transforms/Generate.scala | 21 ++++- 2 files changed, 100 insertions(+), 2 deletions(-) create mode 100644 tapeout/src/main/scala/transforms/AnalogAnnotation.scala diff --git a/tapeout/src/main/scala/transforms/AnalogAnnotation.scala b/tapeout/src/main/scala/transforms/AnalogAnnotation.scala new file mode 100644 index 000000000..5c3ba63a0 --- /dev/null +++ b/tapeout/src/main/scala/transforms/AnalogAnnotation.scala @@ -0,0 +1,81 @@ +// See LICENSE for license details + +package barstools.tapeout.transforms + +import chisel3._ +import chisel3.experimental.ChiselAnnotation +import chisel3.util._ +import chisel3.testers.BasicTester +import chisel3.experimental.{Analog, attach} +import firrtl.ir.{AnalogType, Circuit, DefModule, Expression, HasName, Port, Statement, Type} +import firrtl.{CircuitForm, CircuitState, LowForm, Transform} +import firrtl.annotations.{Annotation, ModuleName, Named, ComponentName} +import firrtl.Mappers._ + +object AnalogRenamerAnnotation { + def apply(target: Named, value: String): Annotation = + Annotation(target, classOf[AnalogRenamer], value) + + def unapply(a: Annotation): Option[(ComponentName, String)] = a match { + case Annotation(named, t, value) if t == classOf[AnalogRenamer] => named match { + case c: ComponentName => Some((c, value)) + case _ => None + } + case _ => None + } +} + +class AnalogRenamer extends Transform { + override def inputForm: CircuitForm = LowForm + override def outputForm: CircuitForm = LowForm + + override def execute(state: CircuitState): CircuitState = { + getMyAnnotations(state) match { + case Nil => state + case annos => + val analogs = annos.collect { case AnalogRenamerAnnotation(ana, name) => (ana, name) } + state.copy(circuit = run(state.circuit, analogs)) + } + } + + def run(circuit: Circuit, annos: Seq[(ComponentName, String)]): Circuit = { + circuit map walkModule(annos) + } + def walkModule(annos: Seq[(ComponentName, String)])(m: DefModule): DefModule = { + val filteredAnnos = Map(annos.filter(a => a._1.module.name == m.name).map { + case (c, s) => c.name.replace(".", "_") -> s + }: _*) + m map walkStatement(filteredAnnos) map walkPort(filteredAnnos) + } + def walkStatement(annos: Map[String, String])(s: Statement): Statement = { + s map walkExpression(annos) + } + def walkPort(annos: Map[String, String])(p: Port): Port = { + if (annos.contains(p.name)) { + updateAnalogVerilog(annos(p.name))(p.tpe) + } + p + } + def walkExpression(annos: Map[String, String])(e: Expression): Expression = { + e match { + case h: HasName => + if (annos.contains(h.name)) e mapType updateAnalogVerilog(annos(h.name)) + case _ => + } + e + } + def updateAnalogVerilog(value: String)(tpe: Type): Type = { + tpe match { + case a: AnalogType => + a.verilogTpe = value + a + case t => t + } + } +} + +trait AnalogAnnotator { self: Module => + def renameAnalog(component: Analog, value: String): Unit = { + annotate(ChiselAnnotation(component, classOf[AnalogRenamer], value)) + } +} diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 79bbd3b0f..536ad7ae2 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -110,6 +110,7 @@ sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { val post = if (top) { Seq( new passes.memlib.InferReadWrite(), new passes.memlib.ReplSeqMem(), + new AnalogRenamer(), new passes.clocklist.ClockListTransform() ) } else Seq() @@ -151,12 +152,28 @@ sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { Seq( new ConvertToExtMod((m) => m.name == synTop.get), new RemoveUnusedModules, - new RenameModulesAndInstances((m) => AllModules.rename(m)) + new RenameModulesAndInstances((m) => AllModules.rename(m)), + new AnalogRenamer() ) } // always the same for now - private def getSecondPhaseAnnotations: AnnotationMap = AnnotationMap(Seq.empty) + private def getSecondPhaseAnnotations: AnnotationMap = { + //Load annotations from file + val annotationArray = annoFile match { + case None => Array[Annotation]() + case Some(fileName) => { + val annotations = new File(fileName) + if(annotations.exists) { + val annotationsYaml = io.Source.fromFile(annotations).getLines().mkString("\n").parseYaml + annotationsYaml.convertTo[Array[Annotation]] + } else { + Array[Annotation]() + } + } + } + AnnotationMap(annotationArray) + } // Top Generation protected def firstPhase(top: Boolean, harness: Boolean): Unit = { From 7c0e6c89d2470eefbcfdb757eccd29e3ebd55765 Mon Sep 17 00:00:00 2001 From: Angie Date: Sun, 2 Apr 2017 04:26:27 -0700 Subject: [PATCH 021/273] firrtl still hasn't fixed the problem with wir primops --- tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala index f727b1cb9..0f5dc5712 100644 --- a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala +++ b/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala @@ -24,7 +24,7 @@ class ClkSrcTransform extends Transform with SeqTransformBased { val targetDir = barstools.tapeout.transforms.GetTargetDir(state) transformList ++= Seq( - InferTypes, + // InferTypes, new CreateClkConstraints(clkModAnnos, clkPortAnnos, targetDir) ) val ret = runTransforms(state) From 9305dd08eb1af0be90e80b3d8a552f01e774f738 Mon Sep 17 00:00:00 2001 From: Angie Date: Sun, 2 Apr 2017 04:34:38 -0700 Subject: [PATCH 022/273] remove functionality from clkgen pass due to compatibility issue with latest firrtl --- .../main/scala/transforms/clkgen/CreateClkConstraints.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala index 1915bdbf7..6975eb735 100644 --- a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala +++ b/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala @@ -24,7 +24,7 @@ class CreateClkConstraints( // TODO: Are annotations only valid on ports? def run(c: Circuit): Circuit = { - +/* val top = c.main // Remove everything from the circuit, unless it has a clock type @@ -146,6 +146,7 @@ class CreateClkConstraints( clkSrcs.foreach { x => println(s"gen clk: $x")} clkModSinkToSourceMap.foreach { x => println(s"sink -> src: $x")} clkModSourceToSinkMap.foreach { x => println(s"src -> dependent sinks: $x")} +*/ c } } \ No newline at end of file From 5b5c8c82db910a9a3222aa53cd006b58b334df3a Mon Sep 17 00:00:00 2001 From: Angie Wang Date: Sun, 2 Apr 2017 13:12:51 -0700 Subject: [PATCH 023/273] Revert "[stevo]: add custom analog annotation" (#21) * Revert "[stevo]: add custom analog annotation (#20)" This reverts commit 7ad088503f1080962561bee5e4ce954af7330888. --- .../scala/transforms/AnalogAnnotation.scala | 81 ------------------- .../src/main/scala/transforms/Generate.scala | 21 +---- 2 files changed, 2 insertions(+), 100 deletions(-) delete mode 100644 tapeout/src/main/scala/transforms/AnalogAnnotation.scala diff --git a/tapeout/src/main/scala/transforms/AnalogAnnotation.scala b/tapeout/src/main/scala/transforms/AnalogAnnotation.scala deleted file mode 100644 index 5c3ba63a0..000000000 --- a/tapeout/src/main/scala/transforms/AnalogAnnotation.scala +++ /dev/null @@ -1,81 +0,0 @@ -// See LICENSE for license details - -package barstools.tapeout.transforms - -import chisel3._ -import chisel3.experimental.ChiselAnnotation -import chisel3.util._ -import chisel3.testers.BasicTester -import chisel3.experimental.{Analog, attach} -import firrtl.ir.{AnalogType, Circuit, DefModule, Expression, HasName, Port, Statement, Type} -import firrtl.{CircuitForm, CircuitState, LowForm, Transform} -import firrtl.annotations.{Annotation, ModuleName, Named, ComponentName} -import firrtl.Mappers._ - -object AnalogRenamerAnnotation { - def apply(target: Named, value: String): Annotation = - Annotation(target, classOf[AnalogRenamer], value) - - def unapply(a: Annotation): Option[(ComponentName, String)] = a match { - case Annotation(named, t, value) if t == classOf[AnalogRenamer] => named match { - case c: ComponentName => Some((c, value)) - case _ => None - } - case _ => None - } -} - -class AnalogRenamer extends Transform { - override def inputForm: CircuitForm = LowForm - override def outputForm: CircuitForm = LowForm - - override def execute(state: CircuitState): CircuitState = { - getMyAnnotations(state) match { - case Nil => state - case annos => - val analogs = annos.collect { case AnalogRenamerAnnotation(ana, name) => (ana, name) } - state.copy(circuit = run(state.circuit, analogs)) - } - } - - def run(circuit: Circuit, annos: Seq[(ComponentName, String)]): Circuit = { - circuit map walkModule(annos) - } - def walkModule(annos: Seq[(ComponentName, String)])(m: DefModule): DefModule = { - val filteredAnnos = Map(annos.filter(a => a._1.module.name == m.name).map { - case (c, s) => c.name.replace(".", "_") -> s - }: _*) - m map walkStatement(filteredAnnos) map walkPort(filteredAnnos) - } - def walkStatement(annos: Map[String, String])(s: Statement): Statement = { - s map walkExpression(annos) - } - def walkPort(annos: Map[String, String])(p: Port): Port = { - if (annos.contains(p.name)) { - updateAnalogVerilog(annos(p.name))(p.tpe) - } - p - } - def walkExpression(annos: Map[String, String])(e: Expression): Expression = { - e match { - case h: HasName => - if (annos.contains(h.name)) e mapType updateAnalogVerilog(annos(h.name)) - case _ => - } - e - } - def updateAnalogVerilog(value: String)(tpe: Type): Type = { - tpe match { - case a: AnalogType => - a.verilogTpe = value - a - case t => t - } - } -} - -trait AnalogAnnotator { self: Module => - def renameAnalog(component: Analog, value: String): Unit = { - annotate(ChiselAnnotation(component, classOf[AnalogRenamer], value)) - } -} diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 536ad7ae2..79bbd3b0f 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -110,7 +110,6 @@ sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { val post = if (top) { Seq( new passes.memlib.InferReadWrite(), new passes.memlib.ReplSeqMem(), - new AnalogRenamer(), new passes.clocklist.ClockListTransform() ) } else Seq() @@ -152,28 +151,12 @@ sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { Seq( new ConvertToExtMod((m) => m.name == synTop.get), new RemoveUnusedModules, - new RenameModulesAndInstances((m) => AllModules.rename(m)), - new AnalogRenamer() + new RenameModulesAndInstances((m) => AllModules.rename(m)) ) } // always the same for now - private def getSecondPhaseAnnotations: AnnotationMap = { - //Load annotations from file - val annotationArray = annoFile match { - case None => Array[Annotation]() - case Some(fileName) => { - val annotations = new File(fileName) - if(annotations.exists) { - val annotationsYaml = io.Source.fromFile(annotations).getLines().mkString("\n").parseYaml - annotationsYaml.convertTo[Array[Annotation]] - } else { - Array[Annotation]() - } - } - } - AnnotationMap(annotationArray) - } + private def getSecondPhaseAnnotations: AnnotationMap = AnnotationMap(Seq.empty) // Top Generation protected def firstPhase(top: Boolean, harness: Boolean): Unit = { From 16846b86fd6ee03514b4629a1f86dfe65e74d1b4 Mon Sep 17 00:00:00 2001 From: Chick Markley Date: Tue, 4 Apr 2017 10:47:59 -0700 Subject: [PATCH 024/273] DiGraph was being being confused with the DigGraph in firrtl. This led to pathological exceptions (#22) No such method error on accessing a lazy val. InstanceGraph seemed also to be a duplicate of firrtl code --- IOPadSpec fails no two tests but these seem to be at least an ordinary error. And should be debugged separately --- .../main/scala/transforms/utils/DiGraph.scala | 158 ------------------ .../transforms/utils/InstanceGraph.scala | 51 ------ 2 files changed, 209 deletions(-) delete mode 100644 tapeout/src/main/scala/transforms/utils/DiGraph.scala delete mode 100644 tapeout/src/main/scala/transforms/utils/InstanceGraph.scala diff --git a/tapeout/src/main/scala/transforms/utils/DiGraph.scala b/tapeout/src/main/scala/transforms/utils/DiGraph.scala deleted file mode 100644 index 8e0db0787..000000000 --- a/tapeout/src/main/scala/transforms/utils/DiGraph.scala +++ /dev/null @@ -1,158 +0,0 @@ -package firrtl - -import scala.collection.immutable.{HashSet, HashMap} -import scala.collection.mutable -import scala.collection.mutable.MultiMap - -class MutableDiGraph[T]( - val edgeData: MultiMap[T,T] = new mutable.HashMap[T, mutable.Set[T]] with MultiMap[T, T]) { - def contains(v: T) = edgeData.contains(v) - def getVertices = edgeData.keys - def getEdges(v: T) = edgeData(v) - def addVertex(v: T): T = { - edgeData.getOrElseUpdate(v,new mutable.HashSet[T]) - v - } - // Add v to keys to maintain invariant - def addEdge(u: T, v: T) = { - edgeData.getOrElseUpdate(v, new mutable.HashSet[T]) - edgeData.addBinding(u,v) - } -} - -object DiGraph { - def apply[T](mdg: MutableDiGraph[T]) = new DiGraph((mdg.edgeData mapValues { _.toSet }).toMap[T, Set[T]]) - def apply[T](edgeData: MultiMap[T,T]) = new DiGraph((edgeData mapValues { _.toSet }).toMap[T, Set[T]]) -} - -class DiGraph[T] (val edges: Map[T, Set[T]]) { - - def getVertices = edges.keys - def getEdges(v: T) = edges.getOrElse(v, new HashSet[T]) - - // Graph must be acyclic for valid linearization - def linearize(root: T) = { - val order = new mutable.ArrayBuffer[T] - val visited = new mutable.HashSet[T] - def explore(v: T): Unit = { - visited += v - for (u <- getEdges(v)) { - if (!visited.contains(u)) { - explore(u) - } - } - order.append(v) - } - explore(root) - order.reverse.toList - } - - def doBFS(root: T) = { - val prev = new mutable.HashMap[T,T] - val queue = new mutable.Queue[T] - queue.enqueue(root) - while (!queue.isEmpty) { - val u = queue.dequeue - for (v <- getEdges(u)) { - if (!prev.contains(v)) { - prev(v) = u - queue.enqueue(v) - } - } - } - prev - } - - def reachabilityBFS(root: T) = doBFS(root).keys.toSet - - def path(start: T, end: T) = { - val nodePath = new mutable.ArrayBuffer[T] - val prev = doBFS(start) - nodePath += end - while (nodePath.last != start) { - nodePath += prev(nodePath.last) - } - nodePath.toList.reverse - } - - def findSCCs = { - var counter: BigInt = 0 - val stack = new mutable.Stack[T] - val onstack = new mutable.HashSet[T] - val indices = new mutable.HashMap[T, BigInt] - val lowlinks = new mutable.HashMap[T, BigInt] - val sccs = new mutable.ArrayBuffer[List[T]] - - def strongConnect(v: T): Unit = { - indices(v) = counter - lowlinks(v) = counter - counter = counter + 1 - stack.push(v) - onstack += v - for (w <- getEdges(v)) { - if (!indices.contains(w)) { - strongConnect(w) - lowlinks(v) = lowlinks(v).min(lowlinks(w)) - } else if (onstack.contains(w)) { - lowlinks(v) = lowlinks(v).min(indices(w)) - } - } - if (lowlinks(v) == indices(v)) { - val scc = new mutable.ArrayBuffer[T] - do { - val w = stack.pop - onstack -= w - scc += w - } - while (scc.last != v); - sccs.append(scc.toList) - } - } - - for (v <- getVertices) { - strongConnect(v) - } - - sccs.toList - } - - def pathsInDAG(start: T): Map[T,List[List[T]]] = { - // paths(v) holds the set of paths from start to v - val paths = new mutable.HashMap[T,mutable.Set[List[T]]] with mutable.MultiMap[T,List[T]] - val queue = new mutable.Queue[T] - val visited = new mutable.HashSet[T] - paths.addBinding(start,List(start)) - queue.enqueue(start) - visited += start - while (!queue.isEmpty) { - val current = queue.dequeue - for (v <- getEdges(current)) { - if (!visited.contains(v)) { - queue.enqueue(v) - visited += v - } - for (p <- paths(current)) { - paths.addBinding(v, p :+ v) - } - } - } - (paths map { case (k,v) => (k,v.toList) }).toMap - } - - def reverse = { - val mdg = new MutableDiGraph[T] - edges foreach { case (u,edges) => edges.foreach({ v => mdg.addEdge(v,u) }) } - DiGraph(mdg) - } - - def simplify(vprime: Set[T]) = { - val eprime = vprime.map( v => (v,reachabilityBFS(v) & vprime) ).toMap - new DiGraph(eprime) - } - - def transformNodes[Q](f: (T) => Q): DiGraph[Q] = { - val eprime = edges.map({ case (k,v) => (f(k),v.map(f(_))) }) - new DiGraph(eprime) - } - -} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/utils/InstanceGraph.scala b/tapeout/src/main/scala/transforms/utils/InstanceGraph.scala deleted file mode 100644 index 10b37ea83..000000000 --- a/tapeout/src/main/scala/transforms/utils/InstanceGraph.scala +++ /dev/null @@ -1,51 +0,0 @@ -package firrtl.analyses - -import scala.collection.mutable - -import firrtl._ -import firrtl.ir._ -import firrtl.Utils._ -import firrtl.Mappers._ - -class InstanceGraph(c: Circuit) { - - private def collectInstances(insts: mutable.Set[WDefInstance])(s: Statement): Statement = s match { - case i: WDefInstance => - insts += i - i - case _ => - s map collectInstances(insts) - s - } - - val moduleMap = c.modules.map({m => (m.name,m) }).toMap - val childInstances = - new mutable.HashMap[String,mutable.Set[WDefInstance]] - for (m <- c.modules) { - childInstances(m.name) = new mutable.HashSet[WDefInstance] - m map collectInstances(childInstances(m.name)) - } - val instanceGraph = new MutableDiGraph[WDefInstance] - val instanceQueue = new mutable.Queue[WDefInstance] - val topInstance = WDefInstance(c.main,c.main) // top instance - instanceQueue.enqueue(topInstance) - while (!instanceQueue.isEmpty) { - val current = instanceQueue.dequeue - for (child <- childInstances(current.module)) { - if (!instanceGraph.contains(child)) { - instanceQueue.enqueue(child) - } - instanceGraph.addEdge(current,child) - } - } - - val graph = DiGraph(instanceGraph) - - lazy val fullHierarchy = graph.pathsInDAG(topInstance) - - def findInstancesInHierarchy(module: String): List[List[WDefInstance]] = { - val instances = graph.getVertices.filter(_.module == module).toList - instances flatMap { i => fullHierarchy(i) } - } - -} From 96939c9ab62779c0cee6ea1fbeb79c550cd3c75f Mon Sep 17 00:00:00 2001 From: Adam Izraelevitz Date: Wed, 6 Sep 2017 12:36:01 -0700 Subject: [PATCH 025/273] Moved clkgen -> .clkgen and pads -> .pads They no longer compile with the latest Chisel/FIRRTL, and may not be supported. However, future work will need them, so this keeps the files around but are ignored by sbt. --- .../scala/transforms/{clkgen => .clkgen}/ClkAnnotations.scala | 0 .../main/scala/transforms/{clkgen => .clkgen}/ClkDivider.scala | 0 .../scala/transforms/{clkgen => .clkgen}/ClkSrcTransform.scala | 0 .../transforms/{clkgen => .clkgen}/CreateClkConstraints.scala | 0 .../scala/transforms/{pads => .pads}/AddIOPadsTransform.scala | 0 .../src/main/scala/transforms/{pads => .pads}/AddPadFrame.scala | 0 .../main/scala/transforms/{pads => .pads}/AnnotatePortPads.scala | 0 .../scala/transforms/{pads => .pads}/AnnotateSupplyPads.scala | 0 .../main/scala/transforms/{pads => .pads}/ChiselTopModule.scala | 0 .../src/main/scala/transforms/{pads => .pads}/CreatePadBBs.scala | 0 .../main/scala/transforms/{pads => .pads}/FoundryPadsYaml.scala | 0 .../main/scala/transforms/{pads => .pads}/PadAnnotations.scala | 0 .../main/scala/transforms/{pads => .pads}/PadDescriptors.scala | 0 .../src/main/scala/transforms/{pads => .pads}/PadPlacement.scala | 0 .../test/scala/transforms/{clkgen => .clkgen}/ClkGenSpec.scala | 0 .../src/test/scala/transforms/{pads => .pads}/AddIOPadsSpec.scala | 0 16 files changed, 0 insertions(+), 0 deletions(-) rename tapeout/src/main/scala/transforms/{clkgen => .clkgen}/ClkAnnotations.scala (100%) rename tapeout/src/main/scala/transforms/{clkgen => .clkgen}/ClkDivider.scala (100%) rename tapeout/src/main/scala/transforms/{clkgen => .clkgen}/ClkSrcTransform.scala (100%) rename tapeout/src/main/scala/transforms/{clkgen => .clkgen}/CreateClkConstraints.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/AddIOPadsTransform.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/AddPadFrame.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/AnnotatePortPads.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/AnnotateSupplyPads.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/ChiselTopModule.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/CreatePadBBs.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/FoundryPadsYaml.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/PadAnnotations.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/PadDescriptors.scala (100%) rename tapeout/src/main/scala/transforms/{pads => .pads}/PadPlacement.scala (100%) rename tapeout/src/test/scala/transforms/{clkgen => .clkgen}/ClkGenSpec.scala (100%) rename tapeout/src/test/scala/transforms/{pads => .pads}/AddIOPadsSpec.scala (100%) diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkAnnotations.scala b/tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala similarity index 100% rename from tapeout/src/main/scala/transforms/clkgen/ClkAnnotations.scala rename to tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala b/tapeout/src/main/scala/transforms/.clkgen/ClkDivider.scala similarity index 100% rename from tapeout/src/main/scala/transforms/clkgen/ClkDivider.scala rename to tapeout/src/main/scala/transforms/.clkgen/ClkDivider.scala diff --git a/tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala b/tapeout/src/main/scala/transforms/.clkgen/ClkSrcTransform.scala similarity index 100% rename from tapeout/src/main/scala/transforms/clkgen/ClkSrcTransform.scala rename to tapeout/src/main/scala/transforms/.clkgen/ClkSrcTransform.scala diff --git a/tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala b/tapeout/src/main/scala/transforms/.clkgen/CreateClkConstraints.scala similarity index 100% rename from tapeout/src/main/scala/transforms/clkgen/CreateClkConstraints.scala rename to tapeout/src/main/scala/transforms/.clkgen/CreateClkConstraints.scala diff --git a/tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/AddIOPadsTransform.scala rename to tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala diff --git a/tapeout/src/main/scala/transforms/pads/AddPadFrame.scala b/tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/AddPadFrame.scala rename to tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala diff --git a/tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala b/tapeout/src/main/scala/transforms/.pads/AnnotatePortPads.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/AnnotatePortPads.scala rename to tapeout/src/main/scala/transforms/.pads/AnnotatePortPads.scala diff --git a/tapeout/src/main/scala/transforms/pads/AnnotateSupplyPads.scala b/tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/AnnotateSupplyPads.scala rename to tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala diff --git a/tapeout/src/main/scala/transforms/pads/ChiselTopModule.scala b/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/ChiselTopModule.scala rename to tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala diff --git a/tapeout/src/main/scala/transforms/pads/CreatePadBBs.scala b/tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/CreatePadBBs.scala rename to tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala diff --git a/tapeout/src/main/scala/transforms/pads/FoundryPadsYaml.scala b/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/FoundryPadsYaml.scala rename to tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala diff --git a/tapeout/src/main/scala/transforms/pads/PadAnnotations.scala b/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/PadAnnotations.scala rename to tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala diff --git a/tapeout/src/main/scala/transforms/pads/PadDescriptors.scala b/tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/PadDescriptors.scala rename to tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala diff --git a/tapeout/src/main/scala/transforms/pads/PadPlacement.scala b/tapeout/src/main/scala/transforms/.pads/PadPlacement.scala similarity index 100% rename from tapeout/src/main/scala/transforms/pads/PadPlacement.scala rename to tapeout/src/main/scala/transforms/.pads/PadPlacement.scala diff --git a/tapeout/src/test/scala/transforms/clkgen/ClkGenSpec.scala b/tapeout/src/test/scala/transforms/.clkgen/ClkGenSpec.scala similarity index 100% rename from tapeout/src/test/scala/transforms/clkgen/ClkGenSpec.scala rename to tapeout/src/test/scala/transforms/.clkgen/ClkGenSpec.scala diff --git a/tapeout/src/test/scala/transforms/pads/AddIOPadsSpec.scala b/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala similarity index 100% rename from tapeout/src/test/scala/transforms/pads/AddIOPadsSpec.scala rename to tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala From c5d01ba19cf220747f2b0264aa2fa56f45252aa8 Mon Sep 17 00:00:00 2001 From: Adam Izraelevitz Date: Wed, 6 Sep 2017 13:22:44 -0700 Subject: [PATCH 026/273] Added retime annotation --- .../main/scala/transforms/retime/Retime.scala | 45 ++++++++++++ .../scala/transforms/retime/RetimeSpec.scala | 71 +++++++++++++++++++ 2 files changed, 116 insertions(+) create mode 100644 tapeout/src/main/scala/transforms/retime/Retime.scala create mode 100644 tapeout/src/test/scala/transforms/retime/RetimeSpec.scala diff --git a/tapeout/src/main/scala/transforms/retime/Retime.scala b/tapeout/src/main/scala/transforms/retime/Retime.scala new file mode 100644 index 000000000..0f67adeae --- /dev/null +++ b/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -0,0 +1,45 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.retime + +import chisel3.internal.InstanceId +import firrtl.PrimOps.Not +import firrtl.annotations.{Annotation, CircuitName, ModuleName, Named, ComponentName} +import firrtl.ir.{Input, UIntType, IntWidth, Module, Port, DefNode, NoInfo, Reference, DoPrim, Block, Circuit} +import firrtl.passes.Pass +import firrtl.{CircuitForm, CircuitState, LowForm, Transform} + +object RetimeAnnotation { + def apply(target: ModuleName): Annotation = Annotation(target, classOf[RetimeTransform], "retime") + def unapply(a: Annotation): Option[Named] = a match { + case Annotation(m, t, "retime") if t == classOf[RetimeTransform] => Some(m) + case _ => None + } +} + +class RetimeTransform extends Transform { + override def inputForm: CircuitForm = LowForm + override def outputForm: CircuitForm = LowForm + + override def execute(state: CircuitState): CircuitState = { + getMyAnnotations(state) match { + case Nil => state + case seq => seq.foreach { + case RetimeAnnotation(ModuleName(module, CircuitName(_))) => + logger.info(s"Retiming module $module") + case RetimeAnnotation(ComponentName(name, ModuleName(module, CircuitName(_)))) => + logger.info(s"Retiming instance $module.$name") + case _ => + throw new Exception(s"There should be RetimeAnnotations, got ${seq.mkString(" -- ")}") + } + state + } + } +} + +trait RetimeLib { + self: chisel3.Module => + def retime(component: InstanceId): Unit = { + annotate(chisel3.experimental.ChiselAnnotation(component, classOf[RetimeTransform], "retime")) + } +} diff --git a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala new file mode 100644 index 000000000..bd52b5bc0 --- /dev/null +++ b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala @@ -0,0 +1,71 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.retime.test + +import chisel3._ +import firrtl._ +import org.scalatest.{FlatSpec, Matchers} +import chisel3.experimental._ +import chisel3.util.HasBlackBoxInline +import chisel3.iotesters._ +import barstools.tapeout.transforms.retime._ + +class RetimeSpec extends FlatSpec with Matchers { + def normalized(s: String): String = { + require(!s.contains("\n")) + s.replaceAll("\\s+", " ").trim + } + def uniqueDirName[T](gen: => T, name: String): String = { + val genClassName = gen.getClass.getName + name + genClassName.hashCode.abs + } + + behavior of "retime library" + + it should "pass simple retime module annotation" in { + val gen = () => new RetimeModule() + val dir = uniqueDirName(gen, "RetimeModule") + chisel3.Driver.execute(Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final.anno"), gen) shouldBe a [ChiselExecutionSuccess] + + val lines = io.Source.fromFile(s"test_run_dir/$dir/final.anno").getLines().map(normalized).toSeq + lines should contain ("Annotation(ModuleName(RetimeModule,CircuitName(RetimeModule)),class barstools.tapeout.transforms.retime.RetimeTransform,retime)") + } + + // TODO(azidar): need to fix/add instance annotations + ignore should "pass simple retime instance annotation" in { + val gen = () => new RetimeInstance() + val dir = uniqueDirName(gen, "RetimeInstance") + chisel3.Driver.execute(Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final.anno"), gen) shouldBe a [ChiselExecutionSuccess] + + val lines = io.Source.fromFile(s"test_run_dir/$dir/final.anno").getLines().map(normalized).toSeq + lines should contain ("Annotation(ComponentName(instance, ModuleName(RetimeInstance,CircuitName(RetimeInstance))),class barstools.tapeout.transforms.retime.RetimeTransform,retime)") + } +} + +class RetimeModule extends Module with RetimeLib { + val io = IO(new Bundle { + val in = Input(UInt(15.W)) + val out = Output(UInt(15.W)) + }) + io.out := io.in + retime(this) +} + +class MyModule extends Module with RetimeLib { + val io = IO(new Bundle { + val in = Input(UInt(15.W)) + val out = Output(UInt(15.W)) + }) + io.out := io.in +} + +class RetimeInstance extends Module with RetimeLib { + val io = IO(new Bundle { + val in = Input(UInt(15.W)) + val out = Output(UInt(15.W)) + }) + val instance = Module(new MyModule) + retime(instance) + instance.io.in := io.in + io.out := instance.io.out +} From f3d39ad08f6a80d9a3d5eba6b5b89a495157a4b2 Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Sat, 1 Jul 2017 17:33:15 -0700 Subject: [PATCH 027/273] initial port attempt for macro compiler --- .../transforms/macros/MacroCompiler.scala | 410 ++++++++++++++++++ .../main/scala/transforms/macros/Utils.scala | 132 ++++++ 2 files changed, 542 insertions(+) create mode 100644 tapeout/src/main/scala/transforms/macros/MacroCompiler.scala create mode 100644 tapeout/src/main/scala/transforms/macros/Utils.scala diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala new file mode 100644 index 000000000..b8b821c72 --- /dev/null +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -0,0 +1,410 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.macros + +import firrtl._ +import firrtl.ir._ +import firrtl.PrimOps +import firrtl.Utils._ +import firrtl.annotations._ +import firrtl.CompilerUtils.getLoweringTransforms +import scala.collection.mutable.{ArrayBuffer, HashMap} +import java.io.{File, FileWriter} +import Utils._ + +object MacroCompilerAnnotation { + def apply(c: String, mem: File, lib: Option[File], synflops: Boolean) = { + Annotation(CircuitName(c), classOf[MacroCompilerTransform], + s"${mem} %s ${synflops}".format(lib map (_.toString) getOrElse "")) + } + private val matcher = "([^ ]+) ([^ ]*) (true|false)".r + def unapply(a: Annotation) = a match { + case Annotation(CircuitName(c), t, matcher(mem, lib, synflops)) if t == classOf[MacroCompilerTransform] => + Some((c, Some(new File(mem)), if (lib.isEmpty) None else Some(new File(lib)), synflops.toBoolean)) + case _ => None + } +} + +class MacroCompilerPass(memFile: Option[File], + libFile: Option[File]) extends firrtl.passes.Pass { + require(memFile.isDefined) + private val mems: Option[Seq[Macro]] = readJSON(memFile) map (_ map (x => new Macro(x))) + private val libs: Option[Seq[Macro]] = readJSON(libFile) map (_ map (x => new Macro(x))) + + def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { + val pairedPorts = ( + (mem.ports filter (p => p.inputName.isDefined && !p.outputName.isDefined)) ++ // write + (mem.ports filter (p => !p.inputName.isDefined && p.outputName.isDefined)) ++ // read + (mem.ports filter (p => p.inputName.isDefined && p.outputName.isDefined)) // read writers + ) zip ( + (lib.ports filter (p => p.inputName.isDefined && !p.outputName.isDefined)) ++ // write + (lib.ports filter (p => !p.inputName.isDefined && p.outputName.isDefined)) ++ // read + (lib.ports filter (p => p.inputName.isDefined && p.outputName.isDefined)) // read writers + ) + + // Parallel mapping + val pairs = ArrayBuffer[(BigInt, BigInt)]() + var last = 0 + for (i <- 0 until mem.width.toInt) { + if (i <= last + 1) { + /* Palmer: Every memory is going to have to fit at least a single bit. */ + // coninue + } else if ((i - last) % lib.width.toInt == 0) { + /* Palmer: It's possible that we rolled over a memory's width here, + if so generate one. */ + pairs += ((last, i-1)) + last = i + } else { + /* Palmer: FIXME: This is a mess, I must just be super confused. */ + for ((memPort, libPort) <- pairedPorts) { + (memPort.maskGran, libPort.maskGran) match { + case (_, Some(p)) if p == 1 => // continue + case (Some(p), _) if i % p == 0 => + pairs += ((last, i-1)) + last = i + case (_, None) => // continue + case (_, Some(p)) if p == lib.width => // continue + case _ => + System.err println "Bit-mask (or unmasked) target memories are suppored only" + return None + } + } + } + } + pairs += ((last, mem.width.toInt - 1)) + + // Serial mapping + val instType = BundleType(lib.ports flatMap (_.tpe.fields)) + val stmts = ArrayBuffer[Statement]() + val selects = HashMap[String, Expression]() + val outputs = HashMap[String, ArrayBuffer[(Expression, Expression)]]() + /* Palmer: If we've got a parallel memory then we've got to take the + * address bits into account. */ + if (mem.depth > lib.depth) { + mem.ports foreach { port => + val high = ceilLog2(mem.depth) + val low = ceilLog2(lib.depth) + val ref = WRef(port.addressName) + val name = s"${ref.name}_sel" + selects(ref.name) = WRef(name, UIntType(IntWidth(high-low))) + stmts += DefNode(NoInfo, name, bits(ref, high-1, low)) + } + } + for ((off, i) <- (0 until mem.depth.toInt by lib.depth.toInt).zipWithIndex) { + for (j <- pairs.indices) { + stmts += WDefInstance(NoInfo, s"mem_${i}_${j}", lib.name, instType) + } + for ((memPort, libPort) <- pairedPorts) { + val addrMatch = selects get memPort.addressName match { + case None => one + case Some(addr) => + val index = UIntLiteral(i, IntWidth(bitWidth(addr.tpe))) + DoPrim(PrimOps.Eq, Seq(addr, index), Nil, index.tpe) + } + def andAddrMatch(e: Expression) = and(e, addrMatch) + val cats = ArrayBuffer[Expression]() + for (((low, high), j) <- pairs.zipWithIndex) { + val inst = WRef(s"mem_${i}_${j}", instType) + def invert(exp: Expression, polarity: Option[PortPolarity]) = + polarity match { + case Some(ActiveLow) | Some(NegativeEdge) => not(exp) + case _ => exp + } + + def connectPorts(mem: Expression, + lib: String, + polarity: Option[PortPolarity]): Statement = + Connect(NoInfo, WSubField(inst, lib), invert(mem, polarity)) + + // Clock port mapping + /* Palmer: FIXME: I don't handle memories with read/write clocks yet. */ + stmts += connectPorts(WRef(memPort.clockName), + libPort.clockName, + libPort.clockPolarity) + + // Adress port mapping + /* Palmer: The address port to a memory is just the low-order bits of + * the top address. */ + stmts += connectPorts(WRef(memPort.addressName), + libPort.addressName, + libPort.addressPolarity) + + // Output port mapping + (memPort.outputName, libPort.outputName) match { + case (Some(mem), Some(lib)) => + /* Palmer: In order to produce the output of a memory we need to cat + * together a bunch of narrower memories, which can only be + * done after generating all the memories. This saves up the + * output statements for later. */ + val name = s"${mem}_${i}_${j}" + val exp = invert(bits(WSubField(inst, lib), high-low, 0), libPort.outputPolarity) + stmts += DefNode(NoInfo, name, exp) + cats += WRef(name) + case (None, Some(lib)) => + /* Palmer: If the inner memory has an output port but the outer + * one doesn't then it's safe to just leave the outer + * port floating. */ + case (None, None) => + /* Palmer: If there's no output ports at all (ie, read-only + * port on the memory) then just don't worry about it, + * there's nothing to do. */ + case (Some(mem), None) => + System.err println "WARNING: Unable to match output ports on memory" + System.err println s" outer output port: ${mem}" + return None + } + + // Input port mapping + (memPort.inputName, libPort.inputName) match { + case (Some(mem), Some(lib)) => + /* Palmer: The input port to a memory just needs to happen in parallel, + * this does a part select to narrow the memory down. */ + stmts += connectPorts(bits(WRef(mem), high, low), lib, libPort.inputPolarity) + case (None, Some(lib)) => + /* Palmer: If the inner memory has an input port but the other + * one doesn't then it's safe to just leave the inner + * port floating. This should be handled by the + * default value of the write enable, so nothing should + * every make it into the memory. */ + case (None, None) => + /* Palmer: If there's no input ports at all (ie, read-only + * port on the memory) then just don't worry about it, + * there's nothing to do. */ + case (Some(mem), None) => + System.err println "WARNING: Unable to match input ports on memory" + System.err println s" outer input port: ${mem}" + return None + } + + // Mask port mapping + val memMask = memPort.maskName match { + case Some(mem) => + /* Palmer: The bits from the outer memory's write mask that will be + * used as the write mask for this inner memory. */ + if (libPort.effectiveMaskGran == libPort.width) { + bits(WRef(mem), low / memPort.effectiveMaskGran) + } else { + if (libPort.effectiveMaskGran != 1) { + // TODO + System.err println "only single-bit mask supported" + return None + } + cat(((low to high) map (i => bits(WRef(mem), i / memPort.effectiveMaskGran))).reverse) + } + case None => + /* Palmer: If there is no input port on the source memory port + * then we don't ever want to turn on this write + * enable. Otherwise, we just _always_ turn on the + * write enable port on the inner memory. */ + if (!libPort.maskName.isDefined) one + else { + val width = libPort.width / libPort.effectiveMaskGran + val value = (BigInt(1) << width.toInt) - 1 + UIntLiteral(value, IntWidth(width)) + } + } + + // Write enable port mapping + val memWriteEnable = memPort.writeEnableName match { + case Some(mem) => + /* Palmer: The outer memory's write enable port, or a constant 1 if + * there isn't a write enable port. */ + WRef(mem) + case None => + /* Palemr: If there is no input port on the source memory port + * then we don't ever want to turn on this write + * enable. Otherwise, we just _always_ turn on the + * write enable port on the inner memory. */ + if (!memPort.inputName.isDefined) zero else one + } + + // Chip enable port mapping + val memChipEnable = memPort.chipEnableName match { + case Some(mem) => WRef(mem) + case None => one + } + + // Read enable port mapping + /* Palmer: It's safe to ignore read enables, but we pass them through + * to the vendor memory if there's a port on there that + * implements the read enables. */ + (memPort.readEnableName, libPort.readEnableName) match { + case (_, None) => + case (Some(mem), Some(lib)) => + stmts += connectPorts(andAddrMatch(WRef(mem)), lib, libPort.readEnablePolarity) + case (None, Some(lib)) => + stmts += connectPorts(andAddrMatch(not(memWriteEnable)), lib, libPort.readEnablePolarity) + } + + /* Palmer: This is actually the memory compiler: it figures out how to + * implement the outer memory's collection of ports using what + * the inner memory has availiable. */ + ((libPort.maskName, libPort.writeEnableName, libPort.chipEnableName): @unchecked) match { + case (Some(mask), Some(we), Some(en)) => + /* Palmer: This is the simple option: every port exists. */ + stmts += connectPorts(memMask, mask, libPort.maskPolarity) + stmts += connectPorts(andAddrMatch(memWriteEnable), we, libPort.writeEnablePolarity) + stmts += connectPorts(andAddrMatch(memChipEnable), en, libPort.chipEnablePolarity) + case (Some(mask), Some(we), None) => + /* Palmer: If we don't have a chip enable but do have */ + stmts += connectPorts(memMask, mask, libPort.maskPolarity) + stmts += connectPorts(andAddrMatch(and(memWriteEnable, memChipEnable)), + we, libPort.writeEnablePolarity) + case (None, Some(we), Some(en)) if bitWidth(memMask.tpe) == 1 => + /* Palmer: If we're expected to provide mask ports without a + * memory that actually has them then we can use the + * write enable port instead of the mask port. */ + stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), + we, libPort.writeEnablePolarity) + stmts += connectPorts(andAddrMatch(memChipEnable), en, libPort.chipEnablePolarity) + case (None, Some(we), Some(en)) => + // TODO + System.err println "cannot emulate multi-bit mask ports with write enable" + return None + case (None, None, None) => + /* Palmer: There's nothing to do here since there aren't any + * ports to match up. */ + } + } + // Cat macro outputs for selection + memPort.outputName match { + case Some(mem) if cats.nonEmpty => + val name = s"${mem}_${i}" + stmts += DefNode(NoInfo, name, cat(cats.toSeq.reverse)) + (outputs getOrElseUpdate (mem, ArrayBuffer[(Expression, Expression)]())) += + (addrMatch -> WRef(name)) + case _ => + } + } + } + // Connect mem outputs + mem.ports foreach { port => + port.outputName match { + case Some(mem) => outputs get mem match { + case Some(select) => + val output = (select foldRight (zero: Expression)) { + case ((cond, tval), fval) => Mux(cond, tval, fval, fval.tpe) } + stmts += Connect(NoInfo, WRef(mem), output) + case None => + } + case None => + } + } + + Some((mem.module(Block(stmts.toSeq)), lib.blackbox)) + } + + def run(c: Circuit): Circuit = { + val modules = (mems, libs) match { + case (Some(mems), Some(libs)) => (mems foldLeft c.modules){ (modules, mem) => + val (best, cost) = (libs foldLeft (None: Option[(Module, ExtModule)], BigInt(Long.MaxValue))){ + case ((best, area), lib) if mem.ports.size != lib.ports.size => + /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same + * order, but I'm starting with what actually gets generated. */ + System.err println s"INFO: unable to compile ${mem.name} using ${lib.name} port count must match" + (best, area) + case ((best, area), lib) => + /* Palmer: A quick cost function (that must be kept in sync with + * memory_cost()) that attempts to avoid compiling unncessary + * memories. This is a lower bound on the cost of compiling a + * memory: it assumes 100% bit-cell utilization when mapping. */ + // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + + // (mem.depth * mem.width) + // Donggyu: I re-define cost + val cost = max(1, mem.depth / lib.depth) * + max(1, mem.width / lib.width) * + (lib.depth * lib.width + 1) // weights on # cells + System.err println s"Cost of ${lib.name} for ${mem.name}: ${cost}" + if (cost > area) (best, area) + else compile(mem, lib) match { + case None => (best, area) + case Some(p) => (Some(p), cost) + } + } + best match { + case None => modules + case Some((mod, bb)) => + (modules filterNot (m => m.name == mod.name || m.name == bb.name)) ++ Seq(mod, bb) + } + } + case _ => c.modules + } + val circuit = c.copy(modules = modules) + // print(circuit.serialize) + circuit + } +} + +class MacroCompilerTransform extends Transform { + def inputForm = HighForm + def outputForm = HighForm + def execute(state: CircuitState) = getMyAnnotations(state) match { + case Seq(MacroCompilerAnnotation(state.circuit.main, mem, lib, synflops)) => + val transforms = Seq( + new MacroCompilerPass(mem, lib), + // TODO: Syn flops + firrtl.passes.SplitExpressions + ) + ((transforms foldLeft state)((s, xform) => xform runTransform s)) + } +} + +class MacroCompiler extends Compiler { + def emitter = new VerilogEmitter + def transforms = + Seq(new MacroCompilerTransform) ++ + getLoweringTransforms(firrtl.HighForm, firrtl.LowForm) // ++ + // Seq(new LowFirrtlOptimization) // Todo: This is dangerous... +} + +object MacroCompiler extends App { + sealed trait MacroParam + case object Macros extends MacroParam + case object Library extends MacroParam + case object Verilog extends MacroParam + type MacroParamMap = Map[MacroParam, File] + val usage = Seq( + "Options:", + " -m, --macro-list: The set of macros to compile", + " -l, --library: The set of macros that have blackbox instances", + " -v, --verilog: Verilog output", + " --syn-flop: Produces synthesizable flop-based memories") mkString "\n" + + def parseArgs(map: MacroParamMap, synflops: Boolean, args: List[String]): (MacroParamMap, Boolean) = + args match { + case Nil => (map, synflops) + case ("-m" | "--macro-list") :: value :: tail => + parseArgs(map + (Macros -> new File(value)), synflops, tail) + case ("-l" | "--library") :: value :: tail => + parseArgs(map + (Library -> new File(value)), synflops, tail) + case ("-v" | "--verilog") :: value :: tail => + parseArgs(map + (Verilog -> new File(value)), synflops, tail) + case "--syn-flops" :: tail => + parseArgs(map, true, tail) + case arg :: tail => + println(s"Unknown field $arg\n") + throw new Exception(usage) + } + + def run(args: List[String]) = { + val (params, synflops) = parseArgs(Map[MacroParam, File](), false, args) + try { + val macros = readJSON(params get Macros).get map (x => (new Macro(x)).blackbox) + val circuit = Circuit(NoInfo, macros, macros.last.name) + val annotations = AnnotationMap(Seq(MacroCompilerAnnotation( + circuit.main, params(Macros), params get Library, synflops))) + val state = CircuitState(circuit, HighForm, Some(annotations)) + val verilog = new FileWriter(params(Verilog)) + val result = new MacroCompiler compile (state, verilog) + verilog.close + result + } catch { + case e: java.util.NoSuchElementException => + throw new Exception(usage) + case e: Throwable => + throw e + } + } + + run(args.toList) +} diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala new file mode 100644 index 000000000..7c3977c6a --- /dev/null +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -0,0 +1,132 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.macros + +import firrtl._ +import firrtl.ir._ +import firrtl.PrimOps +import firrtl.Utils.{ceilLog2, BoolType} +import scala.util.parsing.json.JSON // Todo: this will be gone +import java.io.File +import scala.language.implicitConversions + +trait PortPolarity +case object ActiveLow extends PortPolarity +case object ActiveHigh extends PortPolarity +case object NegativeEdge extends PortPolarity +case object PositiveEdge extends PortPolarity +object PortPolarity { + implicit def toPortPolarity(s: Any): PortPolarity = + (s: @unchecked) match { + case "active low" => ActiveLow + case "active high" => ActiveHigh + case "negative edge" => NegativeEdge + case "positive edge" => PositiveEdge + } + implicit def toPortPolarity(s: Option[Any]): Option[PortPolarity] = + s map toPortPolarity +} + +case class MacroPort( + clockName: String, + clockPolarity: Option[PortPolarity], + addressName: String, + addressPolarity: Option[PortPolarity], + inputName: Option[String], + inputPolarity: Option[PortPolarity], + outputName: Option[String], + outputPolarity: Option[PortPolarity], + chipEnableName: Option[String], + chipEnablePolarity: Option[PortPolarity], + readEnableName: Option[String], + readEnablePolarity: Option[PortPolarity], + writeEnableName: Option[String], + writeEnablePolarity: Option[PortPolarity], + maskName: Option[String], + maskPolarity: Option[PortPolarity], + maskGran: Option[BigInt], + width: BigInt, + depth: BigInt) { + val effectiveMaskGran = maskGran.getOrElse(width) + private val AddrType = UIntType(IntWidth(ceilLog2(depth) max 1)) + private val DataType = UIntType(IntWidth(width)) + private val MaskType = UIntType(IntWidth(width / effectiveMaskGran)) + val tpe = BundleType(Seq( + Field(clockName, Flip, ClockType), + Field(addressName, Flip, AddrType)) ++ + (inputName map (Field(_, Flip, DataType))) ++ + (outputName map (Field(_, Default, DataType))) ++ + (chipEnableName map (Field(_, Flip, BoolType))) ++ + (readEnableName map (Field(_, Flip, BoolType))) ++ + (writeEnableName map (Field(_, Flip, BoolType))) ++ + (maskName map (Field(_, Flip, MaskType))) + ) + val ports = tpe.fields map (f => Port( + NoInfo, f.name, f.flip match { case Default => Output case Flip => Input }, f.tpe)) +} + +class Macro(lib: Map[String, Any]) { + val name = lib("name").asInstanceOf[String] + val width = BigInt(lib("width").asInstanceOf[Double].toInt) + val depth = BigInt(lib("depth").asInstanceOf[Double].toInt) + val ports = lib("ports").asInstanceOf[List[_]] map { x => + val map = x.asInstanceOf[Map[String, Any]] + MacroPort( + map("clock port name").asInstanceOf[String], + map get "clock port polarity", + map("address port name").asInstanceOf[String], + map get "address port polarity", + map get "input port name" map (_.asInstanceOf[String]), + map get "input port polarity", + map get "output port name" map (_.asInstanceOf[String]), + map get "output port polarity", + map get "chip enable port name" map (_.asInstanceOf[String]), + map get "chip enable port polarity", + map get "read enable port name" map (_.asInstanceOf[String]), + map get "read enable port polarity", + map get "write enable port name" map (_.asInstanceOf[String]), + map get "write enable port polarity", + map get "mask port name" map (_.asInstanceOf[String]), + map get "mask port polarity", + map get "mask granularity" map (x => BigInt(x.asInstanceOf[Double].toInt)), + width, + depth + ) + } + private val modPorts = ports flatMap (_.ports) + val blackbox = ExtModule(NoInfo, name, modPorts, name, Nil) + def module(body: Statement) = Module(NoInfo, name, modPorts, body) + +} + +object Utils { + def readJSON(file: Option[File]): Option[Seq[Map[String, Any]]] = file match { + case None => None + case Some(f) => try { + (JSON parseFull io.Source.fromFile(f).mkString) match { + case Some(p: List[Any]) => Some( + (p foldLeft Seq[Map[String, Any]]()){ + case (res, x: Map[_, _]) => + val map = x.asInstanceOf[Map[String, Any]] + if (map("type").asInstanceOf[String] == "sram") res :+ map else res + case (res, _) => res + } + ) + case _ => None + } + } catch { + case _: Throwable => None + } + } + + def and(e1: Expression, e2: Expression) = + DoPrim(PrimOps.And, Seq(e1, e2), Nil, e1.tpe) + def bits(e: Expression, high: BigInt, low: BigInt): Expression = + DoPrim(PrimOps.Bits, Seq(e), Seq(high, low), UIntType(IntWidth(high-low+1))) + def bits(e: Expression, idx: BigInt): Expression = bits(e, idx, idx) + def cat(es: Seq[Expression]): Expression = + if (es.size == 1) es.head + else DoPrim(PrimOps.Cat, Seq(es.head, cat(es.tail)), Nil, UnknownType) + def not(e: Expression) = + DoPrim(PrimOps.Not, Seq(e), Nil, e.tpe) +} From 98155dd83172c3e71042cde853478e77b3cc6ccd Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Tue, 4 Jul 2017 16:30:23 -0700 Subject: [PATCH 028/273] tests for macro compiler --- .../test/resources/macros/lib-1024x8-mrw.json | 35 ++ .../test/resources/macros/lib-1024x8-n28.json | 27 + .../resources/macros/lib-1024x8-r-mw.json | 34 ++ .../test/resources/macros/lib-2048x10-rw.json | 24 + .../resources/macros/lib-2048x16-n28.json | 52 ++ .../resources/macros/lib-2048x8-mrw-re.json | 29 ++ .../test/resources/macros/lib-2048x8-mrw.json | 27 + .../test/resources/macros/lib-32x32-2rw.json | 43 ++ .../test/resources/macros/lib-32x80-mrw.json | 27 + .../test/resources/macros/mem-2000x8-mrw.json | 27 + .../resources/macros/mem-2048x16-mrw-2.json | 27 + .../resources/macros/mem-2048x16-mrw.json | 27 + .../resources/macros/mem-2048x20-mrw.json | 28 ++ .../test/resources/macros/mem-2048x8-mrw.json | 27 + .../resources/macros/mem-2048x8-r-mw.json | 31 ++ .../test/resources/macros/mem-24x52-r-w.json | 22 + .../test/resources/macros/mem-32x160-mrw.json | 27 + .../src/test/resources/macros/rocketchip.json | 76 +++ tapeout/src/test/resources/macros/saed32.json | 186 +++++++ .../transforms/macros/MacroCompilerSpec.scala | 296 +++++++++++ .../scala/transforms/macros/SplitDepth.scala | 219 ++++++++ .../scala/transforms/macros/SplitWidth.scala | 468 ++++++++++++++++++ 22 files changed, 1759 insertions(+) create mode 100644 tapeout/src/test/resources/macros/lib-1024x8-mrw.json create mode 100644 tapeout/src/test/resources/macros/lib-1024x8-n28.json create mode 100644 tapeout/src/test/resources/macros/lib-1024x8-r-mw.json create mode 100644 tapeout/src/test/resources/macros/lib-2048x10-rw.json create mode 100644 tapeout/src/test/resources/macros/lib-2048x16-n28.json create mode 100644 tapeout/src/test/resources/macros/lib-2048x8-mrw-re.json create mode 100644 tapeout/src/test/resources/macros/lib-2048x8-mrw.json create mode 100644 tapeout/src/test/resources/macros/lib-32x32-2rw.json create mode 100644 tapeout/src/test/resources/macros/lib-32x80-mrw.json create mode 100644 tapeout/src/test/resources/macros/mem-2000x8-mrw.json create mode 100644 tapeout/src/test/resources/macros/mem-2048x16-mrw-2.json create mode 100644 tapeout/src/test/resources/macros/mem-2048x16-mrw.json create mode 100644 tapeout/src/test/resources/macros/mem-2048x20-mrw.json create mode 100644 tapeout/src/test/resources/macros/mem-2048x8-mrw.json create mode 100644 tapeout/src/test/resources/macros/mem-2048x8-r-mw.json create mode 100644 tapeout/src/test/resources/macros/mem-24x52-r-w.json create mode 100644 tapeout/src/test/resources/macros/mem-32x160-mrw.json create mode 100644 tapeout/src/test/resources/macros/rocketchip.json create mode 100644 tapeout/src/test/resources/macros/saed32.json create mode 100644 tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala create mode 100644 tapeout/src/test/scala/transforms/macros/SplitDepth.scala create mode 100644 tapeout/src/test/scala/transforms/macros/SplitWidth.scala diff --git a/tapeout/src/test/resources/macros/lib-1024x8-mrw.json b/tapeout/src/test/resources/macros/lib-1024x8-mrw.json new file mode 100644 index 000000000..b85f45dcb --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-1024x8-mrw.json @@ -0,0 +1,35 @@ +[ + { + "type": "sram", + "name": "vendor_sram", + "depth": 1024, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 8, + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "mask port name": "RW0M", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ] + }, + { + "type": "metal filler cell", + "name": "vender_dcap" + }, + { + "type": "filler cell", + "name": "vender_fill" + } +] diff --git a/tapeout/src/test/resources/macros/lib-1024x8-n28.json b/tapeout/src/test/resources/macros/lib-1024x8-n28.json new file mode 100644 index 000000000..7db92ecf9 --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-1024x8-n28.json @@ -0,0 +1,27 @@ +[ + { + "type": "sram", + "name": "vendor_sram", + "depth": 1024, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 1, + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "mask port name": "RW0M", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/lib-1024x8-r-mw.json b/tapeout/src/test/resources/macros/lib-1024x8-r-mw.json new file mode 100644 index 000000000..869468a47 --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-1024x8-r-mw.json @@ -0,0 +1,34 @@ +[ + { + "type": "sram", + "name": "vendor_sram", + "depth": 1024, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 8, + "output port name": "R0O", + "address port name": "R0A", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "address port polarity": "active high" + }, + { + "clock port name": "clock", + "mask granularity": 8, + "input port name": "W0I", + "address port name": "W0A", + "mask port name": "W0M", + "chip enable port name": "W0E", + "write enable port name": "W0W", + "clock port polarity": "positive edge", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/lib-2048x10-rw.json b/tapeout/src/test/resources/macros/lib-2048x10-rw.json new file mode 100644 index 000000000..75640ae56 --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-2048x10-rw.json @@ -0,0 +1,24 @@ +[ + { + "type": "sram", + "name": "vendor_sram", + "depth": 2048, + "width": 10, + "ports": [ + { + "clock port name": "clock", + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/lib-2048x16-n28.json b/tapeout/src/test/resources/macros/lib-2048x16-n28.json new file mode 100644 index 000000000..2f549a27f --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-2048x16-n28.json @@ -0,0 +1,52 @@ +[ + { + "type": "sram", + "name": "vendor_sram_16", + "depth": 2048, + "width": 16, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 1, + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "mask port name": "RW0M", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ] + }, + { + "type": "sram", + "name": "vendor_sram_4", + "depth": 2048, + "width": 4, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 1, + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "mask port name": "RW0M", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/lib-2048x8-mrw-re.json b/tapeout/src/test/resources/macros/lib-2048x8-mrw-re.json new file mode 100644 index 000000000..5766aa78d --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-2048x8-mrw-re.json @@ -0,0 +1,29 @@ +[ + { + "type": "sram", + "name": "vendor_sram", + "depth": 2048, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 8, + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "mask port name": "RW0M", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "read enable port name": "RW0R", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high", + "read enable port polarity": "active low" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/lib-2048x8-mrw.json b/tapeout/src/test/resources/macros/lib-2048x8-mrw.json new file mode 100644 index 000000000..1d4ee508a --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-2048x8-mrw.json @@ -0,0 +1,27 @@ +[ + { + "type": "sram", + "name": "vendor_sram", + "depth": 2048, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 8, + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "mask port name": "RW0M", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/lib-32x32-2rw.json b/tapeout/src/test/resources/macros/lib-32x32-2rw.json new file mode 100644 index 000000000..f90848b2f --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-32x32-2rw.json @@ -0,0 +1,43 @@ +[ + { + "name": "SRAM2RW32x32", + "type": "sram", + "family": "2rw", + "depth": 32, + "width": 32, + "ports": [ + { + "clock port name": "CE1", + "clock port polarity": "positive edge", + "address port name": "A1", + "address port polarity": "active high", + "input port name": "I1", + "input port polarity": "active high", + "output port name": "O1", + "output port polarity": "active high", + "read enable port name": "OEB1", + "read enable port polarity": "active low", + "write enable port name": "WEB1", + "write enable port polarity": "active low", + "chip enable port name": "CSB1", + "chip enable port polarity": "active low" + }, + { + "clock port name": "CE2", + "clock port polarity": "positive edge", + "address port name": "A2", + "address port polarity": "active high", + "input port name": "I2", + "input port polarity": "active high", + "output port name": "O2", + "output port polarity": "active high", + "read enable port name": "OEB2", + "read enable port polarity": "active low", + "write enable port name": "WEB2", + "write enable port polarity": "active low", + "chip enable port name": "CSB2", + "chip enable port polarity": "active low" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/lib-32x80-mrw.json b/tapeout/src/test/resources/macros/lib-32x80-mrw.json new file mode 100644 index 000000000..bdf0581bb --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-32x80-mrw.json @@ -0,0 +1,27 @@ +[ + { + "type": "sram", + "name": "vendor_sram", + "depth": 32, + "width": 80, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 1, + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "mask port name": "RW0M", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/mem-2000x8-mrw.json b/tapeout/src/test/resources/macros/mem-2000x8-mrw.json new file mode 100644 index 000000000..cbb5887a8 --- /dev/null +++ b/tapeout/src/test/resources/macros/mem-2000x8-mrw.json @@ -0,0 +1,27 @@ +[ + { + "type": "sram", + "name": "name_of_sram_module", + "depth": 2000, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "clock port polarity": "positive edge", + "mask granularity": 8, + "output port name": "RW0O", + "output port polarity": "active high", + "input port name": "RW0I", + "input port polarity": "active high", + "address port name": "RW0A", + "address port polarity": "active high", + "mask port name": "RW0M", + "mask port polarity": "active high", + "chip enable port name": "RW0E", + "chip enable port polarity": "active high", + "write enable port name": "RW0W", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/mem-2048x16-mrw-2.json b/tapeout/src/test/resources/macros/mem-2048x16-mrw-2.json new file mode 100644 index 000000000..dcd4aa536 --- /dev/null +++ b/tapeout/src/test/resources/macros/mem-2048x16-mrw-2.json @@ -0,0 +1,27 @@ +[ + { + "type": "sram", + "name": "name_of_sram_module", + "depth": 2048, + "width": 16, + "ports": [ + { + "clock port name": "clock", + "clock port polarity": "positive edge", + "mask granularity": 2, + "output port name": "RW0O", + "output port polarity": "active high", + "input port name": "RW0I", + "input port polarity": "active high", + "address port name": "RW0A", + "address port polarity": "active high", + "mask port name": "RW0M", + "mask port polarity": "active high", + "chip enable port name": "RW0E", + "chip enable port polarity": "active high", + "write enable port name": "RW0W", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/mem-2048x16-mrw.json b/tapeout/src/test/resources/macros/mem-2048x16-mrw.json new file mode 100644 index 000000000..2bf003fe6 --- /dev/null +++ b/tapeout/src/test/resources/macros/mem-2048x16-mrw.json @@ -0,0 +1,27 @@ +[ + { + "type": "sram", + "name": "name_of_sram_module", + "depth": 2048, + "width": 16, + "ports": [ + { + "clock port name": "clock", + "clock port polarity": "positive edge", + "mask granularity": 8, + "output port name": "RW0O", + "output port polarity": "active high", + "input port name": "RW0I", + "input port polarity": "active high", + "address port name": "RW0A", + "address port polarity": "active high", + "mask port name": "RW0M", + "mask port polarity": "active high", + "chip enable port name": "RW0E", + "chip enable port polarity": "active high", + "write enable port name": "RW0W", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/mem-2048x20-mrw.json b/tapeout/src/test/resources/macros/mem-2048x20-mrw.json new file mode 100644 index 000000000..740325066 --- /dev/null +++ b/tapeout/src/test/resources/macros/mem-2048x20-mrw.json @@ -0,0 +1,28 @@ +[ + { + "type": "sram", + "name": "name_of_sram_module", + "depth": 2048, + "width": 20, + "ports": [ + { + "clock port name": "clock", + "clock port polarity": "positive edge", + "mask granularity": 10, + "output port name": "RW0O", + "output port polarity": "active high", + "input port name": "RW0I", + "input port polarity": "active high", + "address port name": "RW0A", + "address port polarity": "active high", + "mask port name": "RW0M", + "mask port polarity": "active high", + "chip enable port name": "RW0E", + "chip enable port polarity": "active high", + "write enable port name": "RW0W", + "write enable port polarity": "active high" + } + ] + } +] + diff --git a/tapeout/src/test/resources/macros/mem-2048x8-mrw.json b/tapeout/src/test/resources/macros/mem-2048x8-mrw.json new file mode 100644 index 000000000..0873fbdb6 --- /dev/null +++ b/tapeout/src/test/resources/macros/mem-2048x8-mrw.json @@ -0,0 +1,27 @@ +[ + { + "type": "sram", + "name": "name_of_sram_module", + "depth": 2048, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "clock port polarity": "positive edge", + "mask granularity": 8, + "output port name": "RW0O", + "output port polarity": "active high", + "input port name": "RW0I", + "input port polarity": "active high", + "address port name": "RW0A", + "address port polarity": "active high", + "mask port name": "RW0M", + "mask port polarity": "active high", + "chip enable port name": "RW0E", + "chip enable port polarity": "active high", + "write enable port name": "RW0W", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/mem-2048x8-r-mw.json b/tapeout/src/test/resources/macros/mem-2048x8-r-mw.json new file mode 100644 index 000000000..e5fd13d11 --- /dev/null +++ b/tapeout/src/test/resources/macros/mem-2048x8-r-mw.json @@ -0,0 +1,31 @@ +[ + { + "type": "sram", + "name": "name_of_sram_module", + "depth": 2048, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "clock port polarity": "positive edge", + "mask granularity": 8, + "input port name": "W0I", + "input port polarity": "active high", + "address port name": "W0A", + "address port polarity": "active high", + "mask port name": "W0M", + "mask port polarity": "active high", + "chip enable port name": "W0E", + "chip enable port polarity": "active high" + }, + { + "clock port name": "clock", + "clock port polarity": "positive edge", + "output port name": "R0O", + "output port polarity": "active high", + "address port name": "R0A", + "address port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/mem-24x52-r-w.json b/tapeout/src/test/resources/macros/mem-24x52-r-w.json new file mode 100644 index 000000000..e4bf66302 --- /dev/null +++ b/tapeout/src/test/resources/macros/mem-24x52-r-w.json @@ -0,0 +1,22 @@ +[ + { + "type": "sram", + "name": "entries_info_ext", + "depth": 24, + "width": 52, + "ports": [ + { + "clock port name": "R0_clk", + "output port name": "R0_data", + "address port name": "R0_addr", + "chip enable port name": "R0_en" + }, + { + "clock port name": "W0_clk", + "input port name": "W0_data", + "address port name": "W0_addr", + "chip enable port name": "W0_en" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/mem-32x160-mrw.json b/tapeout/src/test/resources/macros/mem-32x160-mrw.json new file mode 100644 index 000000000..a01a6d6c1 --- /dev/null +++ b/tapeout/src/test/resources/macros/mem-32x160-mrw.json @@ -0,0 +1,27 @@ +[ + { + "type": "sram", + "name": "name_of_sram_module", + "depth": 32, + "width": 160, + "ports": [ + { + "clock port name": "clock", + "clock port polarity": "positive edge", + "mask granularity": 20, + "output port name": "RW0O", + "output port polarity": "active high", + "input port name": "RW0I", + "input port polarity": "active high", + "address port name": "RW0A", + "address port polarity": "active high", + "mask port name": "RW0M", + "mask port polarity": "active high", + "chip enable port name": "RW0E", + "chip enable port polarity": "active high", + "write enable port name": "RW0W", + "write enable port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/rocketchip.json b/tapeout/src/test/resources/macros/rocketchip.json new file mode 100644 index 000000000..9fe0d2c42 --- /dev/null +++ b/tapeout/src/test/resources/macros/rocketchip.json @@ -0,0 +1,76 @@ +[ + { + "type": "sram", + "name": "tag_array_ext", + "depth": 64, + "width": 80, + "ports": [ + { + "clock port name": "RW0_clk", + "mask granularity": 20, + "output port name": "RW0_rdata", + "input port name": "RW0_wdata", + "address port name": "RW0_addr", + "mask port name": "RW0_wmask", + "chip enable port name": "RW0_en", + "write enable port name": "RW0_wmode" + } + ] + }, + { + "type": "sram", + "name": "T_1090_ext", + "depth": 512, + "width": 64, + "ports": [ + { + "clock port name": "RW0_clk", + "output port name": "RW0_rdata", + "input port name": "RW0_wdata", + "address port name": "RW0_addr", + "chip enable port name": "RW0_en", + "write enable port name": "RW0_wmode" + } + ] + }, + { + "type": "sram", + "name": "T_406_ext", + "depth": 512, + "width": 64, + "ports": [ + { + "clock port name": "RW0_clk", + "mask granularity": 8, + "output port name": "RW0_rdata", + "input port name": "RW0_wdata", + "address port name": "RW0_addr", + "mask port name": "RW0_wmask", + "chip enable port name": "RW0_en", + "write enable port name": "RW0_wmode" + } + ] + }, + { + "type": "sram", + "name": "T_2172_ext", + "depth": 64, + "width": 88, + "ports": [ + { + "clock port name": "W0_clk", + "mask granularity": 22, + "input port name": "W0_data", + "address port name": "W0_addr", + "chip enable port name": "W0_en", + "mask port name": "W0_mask" + }, + { + "clock port name": "R0_clk", + "output port name": "R0_data", + "address port name": "R0_addr", + "chip enable port name": "R0_en" + } + ] + } +] diff --git a/tapeout/src/test/resources/macros/saed32.json b/tapeout/src/test/resources/macros/saed32.json new file mode 100644 index 000000000..de71d89b7 --- /dev/null +++ b/tapeout/src/test/resources/macros/saed32.json @@ -0,0 +1,186 @@ +[ + { + "type": "sram", + "name": "SRAM1RW1024x8", + "width": 8, + "depth": 1024, + "ports": [ + { + "address port name": "A", + "address port polarity": "active high", + "clock port name": "CE", + "clock port polarity": "positive edge", + "write enable port name": "WEB", + "write enable port polarity": "active low", + "read enable port name": "OEB", + "read enable port polarity": "active low", + "chip enable port name": "CEB", + "chip enable port polarity": "active low", + "output port name": "O", + "output port polarity": "active high", + "input port name": "I", + "input port polarity": "active high" + } + ] + }, + { + "type": "sram", + "name": "SRAM1RW512x32", + "width": 32, + "depth": 512, + "ports": [ + { + "address port name": "A", + "address port polarity": "active high", + "clock port name": "CE", + "clock port polarity": "positive edge", + "write enable port name": "WEB", + "write enable port polarity": "active low", + "read enable port name": "OEB", + "read enable port polarity": "active low", + "chip enable port name": "CEB", + "chip enable port polarity": "active low", + "output port name": "O", + "output port polarity": "active high", + "input port name": "I", + "input port polarity": "active high" + } + ] + }, + { + "type": "sram", + "name": "SRAM1RW64x128", + "width": 128, + "depth": 64, + "ports": [ + { + "address port name": "A", + "address port polarity": "active high", + "clock port name": "CE", + "clock port polarity": "positive edge", + "write enable port name": "WEB", + "write enable port polarity": "active low", + "read enable port name": "OEB", + "read enable port polarity": "active low", + "chip enable port name": "CEB", + "chip enable port polarity": "active low", + "output port name": "O", + "output port polarity": "active high", + "input port name": "I", + "input port polarity": "active high" + } + ] + }, + { + "type": "sram", + "name": "SRAM1RW64x32", + "width": 32, + "depth": 64, + "ports": [ + { + "address port name": "A", + "address port polarity": "active high", + "clock port name": "CE", + "clock port polarity": "positive edge", + "write enable port name": "WEB", + "write enable port polarity": "active low", + "read enable port name": "OEB", + "read enable port polarity": "active low", + "chip enable port name": "CEB", + "chip enable port polarity": "active low", + "output port name": "O", + "output port polarity": "active high", + "input port name": "I", + "input port polarity": "active high" + } + ] + }, + { + "type": "sram", + "name": "SRAM1RW64x8", + "width": 8, + "depth": 64, + "ports": [ + { + "address port name": "A", + "address port polarity": "active high", + "clock port name": "CE", + "clock port polarity": "positive edge", + "write enable port name": "WEB", + "write enable port polarity": "active low", + "read enable port name": "OEB", + "read enable port polarity": "active low", + "chip enable port name": "CEB", + "chip enable port polarity": "active low", + "output port name": "O", + "output port polarity": "active high", + "input port name": "I", + "input port polarity": "active high" + } + ] + }, + { + "type": "sram", + "name": "SRAM1RW512x8", + "width": 8, + "depth": 512, + "ports": [ + { + "address port name": "A", + "address port polarity": "active high", + "clock port name": "CE", + "clock port polarity": "positive edge", + "write enable port name": "WEB", + "write enable port polarity": "active low", + "read enable port name": "OEB", + "read enable port polarity": "active low", + "chip enable port name": "CEB", + "chip enable port polarity": "active low", + "output port name": "O", + "output port polarity": "active high", + "input port name": "I", + "input port polarity": "active high" + } + ] + }, + { + "type": "sram", + "name": "SRAM2RW64x32", + "width": 32, + "depth": 64, + "ports": [ + { + "address port name": "A1", + "address port polarity": "active high", + "clock port name": "CE1", + "clock port polarity": "positive edge", + "write enable port name": "WEB1", + "write enable port polarity": "active low", + "read enable port name": "OEB1", + "read enable port polarity": "active low", + "chip enable port name": "CEB1", + "chip enable port polarity": "active low", + "output port name": "O1", + "output port polarity": "active high", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "address port name": "A2", + "address port polarity": "active high", + "clock port name": "CE2", + "clock port polarity": "positive edge", + "write enable port name": "WEB2", + "write enable port polarity": "active low", + "read enable port name": "OEB2", + "read enable port polarity": "active low", + "chip enable port name": "CEB2", + "chip enable port polarity": "active low", + "output port name": "O2", + "output port polarity": "active high", + "input port name": "I2", + "input port polarity": "active high" + } + ] + } +] diff --git a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala new file mode 100644 index 000000000..6b34204d7 --- /dev/null +++ b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala @@ -0,0 +1,296 @@ +package barstools.tapeout.transforms.macros + +import firrtl._ +import firrtl.ir.{Circuit, NoInfo} +import firrtl.passes.RemoveEmpty +import firrtl.Parser.parse +import java.io.{File, StringWriter} + +abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { + val macroDir = new File("tapeout/src/test/resources/macros") + val testDir = new File("test_run_dir/macros") ; testDir.mkdirs + + def args(mem: File, lib: Option[File], v: File, synflops: Boolean) = + List("-m", mem.toString, "-v", v.toString) ++ + (lib match { case None => Nil case Some(l) => List("-l", l.toString) }) ++ + (if (synflops) List("--syn-flops") else Nil) + + def compile(mem: File, lib: Option[File], v: File, synflops: Boolean) { + MacroCompiler.run(args(mem, lib, v, synflops)) + } + + def execute(mem: Option[File], lib: Option[File], synflops: Boolean, output: String) { + require(mem.isDefined) + val macros = Utils.readJSON(mem).get map (x => (new Macro(x)).blackbox) + val circuit = Circuit(NoInfo, macros, macros.last.name) + val passes = Seq(new MacroCompilerPass(mem, lib), RemoveEmpty) + val result = (passes foldLeft circuit)((c, pass) => pass run c) + val gold = RemoveEmpty run parse(output) + (result.serialize) should be (gold.serialize) + } +} + +class RocketChipTest extends MacroCompilerSpec { + val mem = new File(macroDir, "rocketchip.json") + val lib = new File(macroDir, "saed32.json") + val v = new File(testDir, "rocketchip.macro.v") + val output = // TODO: check correctness... +""" +circuit T_2172_ext : + module tag_array_ext : + input RW0_clk : Clock + input RW0_addr : UInt<6> + input RW0_wdata : UInt<80> + output RW0_rdata : UInt<80> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + input RW0_wmask : UInt<4> + + inst mem_0_0 of SRAM1RW64x32 + inst mem_0_1 of SRAM1RW64x32 + inst mem_0_2 of SRAM1RW64x32 + inst mem_0_3 of SRAM1RW64x32 + mem_0_0.CE <= RW0_clk + mem_0_0.A <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.O, 19, 0) + mem_0_0.I <= bits(RW0_wdata, 19, 0) + mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) + mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_1.CE <= RW0_clk + mem_0_1.A <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.O, 19, 0) + mem_0_1.I <= bits(RW0_wdata, 39, 20) + mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) + mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_2.CE <= RW0_clk + mem_0_2.A <= RW0_addr + node RW0_rdata_0_2 = bits(mem_0_2.O, 19, 0) + mem_0_2.I <= bits(RW0_wdata, 59, 40) + mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) + mem_0_2.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_3.CE <= RW0_clk + mem_0_3.A <= RW0_addr + node RW0_rdata_0_3 = bits(mem_0_3.O, 19, 0) + mem_0_3.I <= bits(RW0_wdata, 79, 60) + mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) + mem_0_3.CEB <= not(and(RW0_en, UInt<1>("h1"))) + node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule SRAM1RW64x32 : + input CE : Clock + input A : UInt<6> + input I : UInt<32> + output O : UInt<32> + input CEB : UInt<1> + input OEB : UInt<1> + input WEB : UInt<1> + + defname = SRAM1RW64x32 + + + module T_1090_ext : + input RW0_clk : Clock + input RW0_addr : UInt<9> + input RW0_wdata : UInt<64> + output RW0_rdata : UInt<64> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + + inst mem_0_0 of SRAM1RW512x32 + inst mem_0_1 of SRAM1RW512x32 + mem_0_0.CE <= RW0_clk + mem_0_0.A <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.O, 31, 0) + mem_0_0.I <= bits(RW0_wdata, 31, 0) + mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_1.CE <= RW0_clk + mem_0_1.A <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.O, 31, 0) + mem_0_1.I <= bits(RW0_wdata, 63, 32) + mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) + node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + module T_406_ext : + input RW0_clk : Clock + input RW0_addr : UInt<9> + input RW0_wdata : UInt<64> + output RW0_rdata : UInt<64> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + input RW0_wmask : UInt<8> + + inst mem_0_0 of SRAM1RW512x32 + inst mem_0_1 of SRAM1RW512x32 + inst mem_0_2 of SRAM1RW512x32 + inst mem_0_3 of SRAM1RW512x32 + inst mem_0_4 of SRAM1RW512x32 + inst mem_0_5 of SRAM1RW512x32 + inst mem_0_6 of SRAM1RW512x32 + inst mem_0_7 of SRAM1RW512x32 + mem_0_0.CE <= RW0_clk + mem_0_0.A <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.O, 7, 0) + mem_0_0.I <= bits(RW0_wdata, 7, 0) + mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) + mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_1.CE <= RW0_clk + mem_0_1.A <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.O, 7, 0) + mem_0_1.I <= bits(RW0_wdata, 15, 8) + mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) + mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_2.CE <= RW0_clk + mem_0_2.A <= RW0_addr + node RW0_rdata_0_2 = bits(mem_0_2.O, 7, 0) + mem_0_2.I <= bits(RW0_wdata, 23, 16) + mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) + mem_0_2.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_3.CE <= RW0_clk + mem_0_3.A <= RW0_addr + node RW0_rdata_0_3 = bits(mem_0_3.O, 7, 0) + mem_0_3.I <= bits(RW0_wdata, 31, 24) + mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) + mem_0_3.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_4.CE <= RW0_clk + mem_0_4.A <= RW0_addr + node RW0_rdata_0_4 = bits(mem_0_4.O, 7, 0) + mem_0_4.I <= bits(RW0_wdata, 39, 32) + mem_0_4.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) + mem_0_4.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_5.CE <= RW0_clk + mem_0_5.A <= RW0_addr + node RW0_rdata_0_5 = bits(mem_0_5.O, 7, 0) + mem_0_5.I <= bits(RW0_wdata, 47, 40) + mem_0_5.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) + mem_0_5.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_6.CE <= RW0_clk + mem_0_6.A <= RW0_addr + node RW0_rdata_0_6 = bits(mem_0_6.O, 7, 0) + mem_0_6.I <= bits(RW0_wdata, 55, 48) + mem_0_6.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) + mem_0_6.CEB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_7.CE <= RW0_clk + mem_0_7.A <= RW0_addr + node RW0_rdata_0_7 = bits(mem_0_7.O, 7, 0) + mem_0_7.I <= bits(RW0_wdata, 63, 56) + mem_0_7.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) + mem_0_7.CEB <= not(and(RW0_en, UInt<1>("h1"))) + node RW0_rdata_0 = cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule SRAM1RW512x32 : + input CE : Clock + input A : UInt<9> + input I : UInt<32> + output O : UInt<32> + input CEB : UInt<1> + input OEB : UInt<1> + input WEB : UInt<1> + + defname = SRAM1RW512x32 + + + module T_2172_ext : + input W0_clk : Clock + input W0_addr : UInt<6> + input W0_data : UInt<88> + input W0_en : UInt<1> + input W0_mask : UInt<4> + input R0_clk : Clock + input R0_addr : UInt<6> + output R0_data : UInt<88> + input R0_en : UInt<1> + + inst mem_0_0 of SRAM2RW64x32 + inst mem_0_1 of SRAM2RW64x32 + inst mem_0_2 of SRAM2RW64x32 + inst mem_0_3 of SRAM2RW64x32 + mem_0_0.CE1 <= W0_clk + mem_0_0.A1 <= W0_addr + mem_0_0.I1 <= bits(W0_data, 21, 0) + mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), UInt<1>("h1"))) + mem_0_0.CEB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_1.CE1 <= W0_clk + mem_0_1.A1 <= W0_addr + mem_0_1.I1 <= bits(W0_data, 43, 22) + mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), UInt<1>("h1"))) + mem_0_1.CEB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_2.CE1 <= W0_clk + mem_0_2.A1 <= W0_addr + mem_0_2.I1 <= bits(W0_data, 65, 44) + mem_0_2.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), UInt<1>("h1"))) + mem_0_2.CEB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_3.CE1 <= W0_clk + mem_0_3.A1 <= W0_addr + mem_0_3.I1 <= bits(W0_data, 87, 66) + mem_0_3.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), UInt<1>("h1"))) + mem_0_3.CEB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_0.CE2 <= R0_clk + mem_0_0.A2 <= R0_addr + node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) + mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.CEB2 <= not(and(R0_en, UInt<1>("h1"))) + mem_0_1.CE2 <= R0_clk + mem_0_1.A2 <= R0_addr + node R0_data_0_1 = bits(mem_0_1.O2, 21, 0) + mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.CEB2 <= not(and(R0_en, UInt<1>("h1"))) + mem_0_2.CE2 <= R0_clk + mem_0_2.A2 <= R0_addr + node R0_data_0_2 = bits(mem_0_2.O2, 21, 0) + mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_2.CEB2 <= not(and(R0_en, UInt<1>("h1"))) + mem_0_3.CE2 <= R0_clk + mem_0_3.A2 <= R0_addr + node R0_data_0_3 = bits(mem_0_3.O2, 21, 0) + mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_3.CEB2 <= not(and(R0_en, UInt<1>("h1"))) + node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) + R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) + + extmodule SRAM2RW64x32 : + input CE1 : Clock + input A1 : UInt<6> + input I1 : UInt<32> + output O1 : UInt<32> + input CEB1 : UInt<1> + input OEB1 : UInt<1> + input WEB1 : UInt<1> + input CE2 : Clock + input A2 : UInt<6> + input I2 : UInt<32> + output O2 : UInt<32> + input CEB2 : UInt<1> + input OEB2 : UInt<1> + input WEB2 : UInt<1> + + defname = SRAM2RW64x32 +""" + compile(mem, Some(lib), v, false) +} diff --git a/tapeout/src/test/scala/transforms/macros/SplitDepth.scala b/tapeout/src/test/scala/transforms/macros/SplitDepth.scala new file mode 100644 index 000000000..c5ef8e198 --- /dev/null +++ b/tapeout/src/test/scala/transforms/macros/SplitDepth.scala @@ -0,0 +1,219 @@ +package barstools.tapeout.transforms.macros + +import java.io.File + +class SplitDepth2048x8_mrw extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x8-mrw.json") + val lib = new File(macroDir, "lib-1024x8-mrw.json") + val v = new File(testDir, "split_depth_2048x8_mrw.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + node RW0A_sel = bits(RW0A, 10, 10) + inst mem_0_0 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + mem_0_0.RW0I <= bits(RW0I, 7, 0) + mem_0_0.RW0M <= bits(RW0M, 0, 0) + mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) + mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) + node RW0O_0 = RW0O_0_0 + inst mem_1_0 of vendor_sram + mem_1_0.clock <= clock + mem_1_0.RW0A <= RW0A + node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) + mem_1_0.RW0I <= bits(RW0I, 7, 0) + mem_1_0.RW0M <= bits(RW0M, 0, 0) + mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) + mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) + node RW0O_1 = RW0O_1_0 + RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<10> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitDepth2000x8_mrw extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2000x8-mrw.json") + val lib = new File(macroDir, "lib-1024x8-mrw.json") + val v = new File(testDir, "split_depth_2000x8_mrw.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + node RW0A_sel = bits(RW0A, 10, 10) + inst mem_0_0 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + mem_0_0.RW0I <= bits(RW0I, 7, 0) + mem_0_0.RW0M <= bits(RW0M, 0, 0) + mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) + mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) + node RW0O_0 = RW0O_0_0 + inst mem_1_0 of vendor_sram + mem_1_0.clock <= clock + mem_1_0.RW0A <= RW0A + node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) + mem_1_0.RW0I <= bits(RW0I, 7, 0) + mem_1_0.RW0M <= bits(RW0M, 0, 0) + mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) + mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) + node RW0O_1 = RW0O_1_0 + RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<10> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitDepth2048x8_n28 extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x8-mrw.json") + val lib = new File(macroDir, "lib-1024x8-n28.json") + val v = new File(testDir, "split_depth_2048x8_n28.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + node RW0A_sel = bits(RW0A, 10, 10) + inst mem_0_0 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + mem_0_0.RW0I <= bits(RW0I, 7, 0) + mem_0_0.RW0M <= cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))) + mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) + mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) + node RW0O_0 = RW0O_0_0 + inst mem_1_0 of vendor_sram + mem_1_0.clock <= clock + mem_1_0.RW0A <= RW0A + node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) + mem_1_0.RW0I <= bits(RW0I, 7, 0) + mem_1_0.RW0M <= cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))) + mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) + mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) + node RW0O_1 = RW0O_1_0 + RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<10> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<8> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitDepth2048x8_r_mw extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x8-r-mw.json") + val lib = new File(macroDir, "lib-1024x8-r-mw.json") + val v = new File(testDir, "split_depth_2048x8_r_mw.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input W0A : UInt<11> + input W0I : UInt<8> + input W0E : UInt<1> + input W0M : UInt<1> + input clock : Clock + input R0A : UInt<11> + output R0O : UInt<8> + + node W0A_sel = bits(W0A, 10, 10) + node R0A_sel = bits(R0A, 10, 10) + inst mem_0_0 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.W0A <= W0A + mem_0_0.W0I <= bits(W0I, 7, 0) + mem_0_0.W0M <= bits(W0M, 0, 0) + mem_0_0.W0W <= and(UInt<1>("h1"), eq(W0A_sel, UInt<1>("h0"))) + mem_0_0.W0E <= and(W0E, eq(W0A_sel, UInt<1>("h0"))) + mem_0_0.clock <= clock + mem_0_0.R0A <= R0A + node R0O_0_0 = bits(mem_0_0.R0O, 7, 0) + node R0O_0 = R0O_0_0 + inst mem_1_0 of vendor_sram + mem_1_0.clock <= clock + mem_1_0.W0A <= W0A + mem_1_0.W0I <= bits(W0I, 7, 0) + mem_1_0.W0M <= bits(W0M, 0, 0) + mem_1_0.W0W <= and(UInt<1>("h1"), eq(W0A_sel, UInt<1>("h1"))) + mem_1_0.W0E <= and(W0E, eq(W0A_sel, UInt<1>("h1"))) + mem_1_0.clock <= clock + mem_1_0.R0A <= R0A + node R0O_1_0 = bits(mem_1_0.R0O, 7, 0) + node R0O_1 = R0O_1_0 + R0O <= mux(eq(R0A_sel, UInt<1>("h0")), R0O_0, mux(eq(R0A_sel, UInt<1>("h1")), R0O_1, UInt<1>("h0"))) + + extmodule vendor_sram : + input clock : Clock + input R0A : UInt<10> + output R0O : UInt<8> + input clock : Clock + input W0A : UInt<10> + input W0I : UInt<8> + input W0E : UInt<1> + input W0W : UInt<1> + input W0M : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} diff --git a/tapeout/src/test/scala/transforms/macros/SplitWidth.scala b/tapeout/src/test/scala/transforms/macros/SplitWidth.scala new file mode 100644 index 000000000..0e4d638e5 --- /dev/null +++ b/tapeout/src/test/scala/transforms/macros/SplitWidth.scala @@ -0,0 +1,468 @@ +package barstools.tapeout.transforms.macros + +import java.io.File + +class SplitWidth2048x16_mrw extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x16-mrw.json") + val lib = new File(macroDir, "lib-2048x8-mrw.json") + val v = new File(testDir, "split_width_2048x16_mrw.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<16> + output RW0O : UInt<16> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<2> + + inst mem_0_0 of vendor_sram + inst mem_0_1 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + mem_0_0.RW0I <= bits(RW0I, 7, 0) + mem_0_0.RW0M <= bits(RW0M, 0, 0) + mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_1.clock <= clock + mem_0_1.RW0A <= RW0A + node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) + mem_0_1.RW0I <= bits(RW0I, 15, 8) + mem_0_1.RW0M <= bits(RW0M, 1, 1) + mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) + RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitWidth2048x16_mrw_Uneven extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x16-mrw.json") + val lib = new File(macroDir, "lib-2048x10-rw.json") + val v = new File(testDir, "split_width_2048x16_mrw_uneven.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<16> + output RW0O : UInt<16> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<2> + + inst mem_0_0 of vendor_sram + inst mem_0_1 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + mem_0_0.RW0I <= bits(RW0I, 7, 0) + mem_0_0.RW0W <= and(and(RW0W, bits(RW0M, 0, 0)), UInt<1>("h1")) + mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_1.clock <= clock + mem_0_1.RW0A <= RW0A + node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) + mem_0_1.RW0I <= bits(RW0I, 15, 8) + mem_0_1.RW0W <= and(and(RW0W, bits(RW0M, 1, 1)), UInt<1>("h1")) + mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) + RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<10> + output RW0O : UInt<10> + input RW0E : UInt<1> + input RW0W : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitWidth2048x16_mrw_VeryUneven extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x16-mrw-2.json") + val lib = new File(macroDir, "lib-2048x10-rw.json") + val v = new File(testDir, "split_width_2048x16_mrw_very_uneven.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<16> + output RW0O : UInt<16> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<8> + + inst mem_0_0 of vendor_sram + inst mem_0_1 of vendor_sram + inst mem_0_2 of vendor_sram + inst mem_0_3 of vendor_sram + inst mem_0_4 of vendor_sram + inst mem_0_5 of vendor_sram + inst mem_0_6 of vendor_sram + inst mem_0_7 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 1, 0) + mem_0_0.RW0I <= bits(RW0I, 1, 0) + mem_0_0.RW0W <= and(and(RW0W, bits(RW0M, 0, 0)), UInt<1>("h1")) + mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_1.clock <= clock + mem_0_1.RW0A <= RW0A + node RW0O_0_1 = bits(mem_0_1.RW0O, 1, 0) + mem_0_1.RW0I <= bits(RW0I, 3, 2) + mem_0_1.RW0W <= and(and(RW0W, bits(RW0M, 1, 1)), UInt<1>("h1")) + mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_2.clock <= clock + mem_0_2.RW0A <= RW0A + node RW0O_0_2 = bits(mem_0_2.RW0O, 1, 0) + mem_0_2.RW0I <= bits(RW0I, 5, 4) + mem_0_2.RW0W <= and(and(RW0W, bits(RW0M, 2, 2)), UInt<1>("h1")) + mem_0_2.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_3.clock <= clock + mem_0_3.RW0A <= RW0A + node RW0O_0_3 = bits(mem_0_3.RW0O, 1, 0) + mem_0_3.RW0I <= bits(RW0I, 7, 6) + mem_0_3.RW0W <= and(and(RW0W, bits(RW0M, 3, 3)), UInt<1>("h1")) + mem_0_3.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_4.clock <= clock + mem_0_4.RW0A <= RW0A + node RW0O_0_4 = bits(mem_0_4.RW0O, 1, 0) + mem_0_4.RW0I <= bits(RW0I, 9, 8) + mem_0_4.RW0W <= and(and(RW0W, bits(RW0M, 4, 4)), UInt<1>("h1")) + mem_0_4.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_5.clock <= clock + mem_0_5.RW0A <= RW0A + node RW0O_0_5 = bits(mem_0_5.RW0O, 1, 0) + mem_0_5.RW0I <= bits(RW0I, 11, 10) + mem_0_5.RW0W <= and(and(RW0W, bits(RW0M, 5, 5)), UInt<1>("h1")) + mem_0_5.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_6.clock <= clock + mem_0_6.RW0A <= RW0A + node RW0O_0_6 = bits(mem_0_6.RW0O, 1, 0) + mem_0_6.RW0I <= bits(RW0I, 13, 12) + mem_0_6.RW0W <= and(and(RW0W, bits(RW0M, 6, 6)), UInt<1>("h1")) + mem_0_6.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_7.clock <= clock + mem_0_7.RW0A <= RW0A + node RW0O_0_7 = bits(mem_0_7.RW0O, 1, 0) + mem_0_7.RW0I <= bits(RW0I, 15, 14) + mem_0_7.RW0W <= and(and(RW0W, bits(RW0M, 7, 7)), UInt<1>("h1")) + mem_0_7.RW0E <= and(RW0E, UInt<1>("h1")) + node RW0O_0 = cat(RW0O_0_7, cat(RW0O_0_6, cat(RW0O_0_5, cat(RW0O_0_4, cat(RW0O_0_3, cat(RW0O_0_2, cat(RW0O_0_1, RW0O_0_0))))))) + RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<10> + output RW0O : UInt<10> + input RW0E : UInt<1> + input RW0W : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitWidth2048x16_mrw_ReadEnable extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x16-mrw.json") + val lib = new File(macroDir, "lib-2048x8-mrw-re.json") + val v = new File(testDir, "split_width_2048x16_mrw_read_enable.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<16> + output RW0O : UInt<16> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<2> + + inst mem_0_0 of vendor_sram + inst mem_0_1 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + mem_0_0.RW0I <= bits(RW0I, 7, 0) + mem_0_0.RW0R <= not(and(not(RW0W), UInt<1>("h1"))) + mem_0_0.RW0M <= bits(RW0M, 0, 0) + mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_1.clock <= clock + mem_0_1.RW0A <= RW0A + node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) + mem_0_1.RW0I <= bits(RW0I, 15, 8) + mem_0_1.RW0R <= not(and(not(RW0W), UInt<1>("h1"))) + mem_0_1.RW0M <= bits(RW0M, 1, 1) + mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) + RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0R : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitWidth2048x16_n28 extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x16-mrw.json") + val lib = new File(macroDir, "lib-2048x16-n28.json") + val v = new File(testDir, "split_width_2048x16_n28.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<16> + output RW0O : UInt<16> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<2> + + inst mem_0_0 of vendor_sram_16 + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 15, 0) + mem_0_0.RW0I <= bits(RW0I, 15, 0) + mem_0_0.RW0M <= cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))))))))))) + mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + node RW0O_0 = RW0O_0_0 + RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + extmodule vendor_sram_16 : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<16> + output RW0O : UInt<16> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<16> + + defname = vendor_sram_16 +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitWidth2048x20_mrw_UnevenMask extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x20-mrw.json") + val lib = new File(macroDir, "lib-2048x8-mrw.json") + val v = new File(testDir, "split_width_2048x20_mrw_uneven_mask.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<20> + output RW0O : UInt<20> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<2> + + inst mem_0_0 of vendor_sram + inst mem_0_1 of vendor_sram + inst mem_0_2 of vendor_sram + inst mem_0_3 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + mem_0_0.RW0I <= bits(RW0I, 7, 0) + mem_0_0.RW0M <= bits(RW0M, 0, 0) + mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_1.clock <= clock + mem_0_1.RW0A <= RW0A + node RW0O_0_1 = bits(mem_0_1.RW0O, 1, 0) + mem_0_1.RW0I <= bits(RW0I, 9, 8) + mem_0_1.RW0M <= bits(RW0M, 0, 0) + mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_2.clock <= clock + mem_0_2.RW0A <= RW0A + node RW0O_0_2 = bits(mem_0_2.RW0O, 7, 0) + mem_0_2.RW0I <= bits(RW0I, 17, 10) + mem_0_2.RW0M <= bits(RW0M, 1, 1) + mem_0_2.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_2.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_3.clock <= clock + mem_0_3.RW0A <= RW0A + node RW0O_0_3 = bits(mem_0_3.RW0O, 1, 0) + mem_0_3.RW0I <= bits(RW0I, 19, 18) + mem_0_3.RW0M <= bits(RW0M, 1, 1) + mem_0_3.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_3.RW0E <= and(RW0E, UInt<1>("h1")) + node RW0O_0 = cat(RW0O_0_3, cat(RW0O_0_2, cat(RW0O_0_1, RW0O_0_0))) + RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitWidth24x52 extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-24x52-r-w.json") + val lib = new File(macroDir, "lib-32x32-2rw.json") + val v = new File(testDir, "split_width_24x52.v") + val output = +""" +circuit entries_info_ext : + module entries_info_ext : + input R0_clk : Clock + input R0_addr : UInt<5> + output R0_data : UInt<52> + input R0_en : UInt<1> + input W0_clk : Clock + input W0_addr : UInt<5> + input W0_data : UInt<52> + input W0_en : UInt<1> + + inst mem_0_0 of SRAM2RW32x32 + inst mem_0_1 of SRAM2RW32x32 + mem_0_0.CE1 <= W0_clk + mem_0_0.A1 <= W0_addr + mem_0_0.I1 <= bits(W0_data, 31, 0) + mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_1.CE1 <= W0_clk + mem_0_1.A1 <= W0_addr + mem_0_1.I1 <= bits(W0_data, 51, 32) + mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_0.CE2 <= R0_clk + mem_0_0.A2 <= R0_addr + node R0_data_0_0 = bits(mem_0_0.O2, 31, 0) + mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + mem_0_1.CE2 <= R0_clk + mem_0_1.A2 <= R0_addr + node R0_data_0_1 = bits(mem_0_1.O2, 19, 0) + mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) + R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) + + extmodule SRAM2RW32x32 : + input CE1 : Clock + input A1 : UInt<5> + input I1 : UInt<32> + output O1 : UInt<32> + input CSB1 : UInt<1> + input OEB1 : UInt<1> + input WEB1 : UInt<1> + input CE2 : Clock + input A2 : UInt<5> + input I2 : UInt<32> + output O2 : UInt<32> + input CSB2 : UInt<1> + input OEB2 : UInt<1> + input WEB2 : UInt<1> + + defname = SRAM2RW32x32 +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} + +class SplitWidth32x160 extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-32x160-mrw.json") + val lib = new File(macroDir, "lib-32x80-mrw.json") + val v = new File(testDir, "split_width_32x160.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<5> + input RW0I : UInt<160> + output RW0O : UInt<160> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<8> + + inst mem_0_0 of vendor_sram + inst mem_0_1 of vendor_sram + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 79, 0) + mem_0_0.RW0I <= bits(RW0I, 79, 0) + mem_0_0.RW0M <= cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) + mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + mem_0_1.clock <= clock + mem_0_1.RW0A <= RW0A + node RW0O_0_1 = bits(mem_0_1.RW0O, 79, 0) + mem_0_1.RW0I <= bits(RW0I, 159, 80) + mem_0_1.RW0M <= cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), bits(RW0M, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) + mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) + mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) + RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<5> + input RW0I : UInt<80> + output RW0O : UInt<80> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<80> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} From 4f5a9ae02e98c6fb9d59cc956b958a8c06cccfe0 Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Tue, 4 Jul 2017 17:15:38 -0700 Subject: [PATCH 029/273] connect extra ports --- .../transforms/macros/MacroCompiler.scala | 7 ++- .../main/scala/transforms/macros/Utils.scala | 21 +++++-- .../resources/macros/lib-1024x8-sleep.json | 35 ++++++++++++ .../scala/transforms/macros/SplitDepth.scala | 56 +++++++++++++++++++ 4 files changed, 113 insertions(+), 6 deletions(-) create mode 100644 tapeout/src/test/resources/macros/lib-1024x8-sleep.json diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala index b8b821c72..0330ab135 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -92,7 +92,12 @@ class MacroCompilerPass(memFile: Option[File], } for ((off, i) <- (0 until mem.depth.toInt by lib.depth.toInt).zipWithIndex) { for (j <- pairs.indices) { - stmts += WDefInstance(NoInfo, s"mem_${i}_${j}", lib.name, instType) + val name = s"mem_${i}_${j}" + stmts += WDefInstance(NoInfo, name, lib.name, instType) + // connect extra ports + stmts ++= lib.extraPorts map { case (portName, portValue) => + Connect(NoInfo, WSubField(WRef(name), portName), portValue) + } } for ((memPort, libPort) <- pairedPorts) { val addrMatch = selects get memPort.addressName match { diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index 7c3977c6a..5a2e34e66 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -67,8 +67,8 @@ case class MacroPort( class Macro(lib: Map[String, Any]) { val name = lib("name").asInstanceOf[String] - val width = BigInt(lib("width").asInstanceOf[Double].toInt) - val depth = BigInt(lib("depth").asInstanceOf[Double].toInt) + val width = BigInt(lib("width").asInstanceOf[Double].toLong) + val depth = BigInt(lib("depth").asInstanceOf[Double].toLong) val ports = lib("ports").asInstanceOf[List[_]] map { x => val map = x.asInstanceOf[Map[String, Any]] MacroPort( @@ -88,15 +88,26 @@ class Macro(lib: Map[String, Any]) { map get "write enable port polarity", map get "mask port name" map (_.asInstanceOf[String]), map get "mask port polarity", - map get "mask granularity" map (x => BigInt(x.asInstanceOf[Double].toInt)), + map get "mask granularity" map (x => BigInt(x.asInstanceOf[Double].toLong)), width, depth ) } - private val modPorts = ports flatMap (_.ports) + val extraPorts = lib get "extra ports" match { + case None => Nil + case Some(p) => p.asInstanceOf[List[_]] map { x => + val map = x.asInstanceOf[Map[String, Any]] + assert(map("type").asInstanceOf[String] == "constant") // TODO: release it? + val name = map("name").asInstanceOf[String] + val width = BigInt(map("width").asInstanceOf[Double].toLong) + val value = BigInt(map("value").asInstanceOf[Double].toLong) + (name -> UIntLiteral(value, IntWidth(width))) + } + } + private val modPorts = (ports flatMap (_.ports)) ++ + (extraPorts map { case (name, value) => Port(NoInfo, name, Input, value.tpe) }) val blackbox = ExtModule(NoInfo, name, modPorts, name, Nil) def module(body: Statement) = Module(NoInfo, name, modPorts, body) - } object Utils { diff --git a/tapeout/src/test/resources/macros/lib-1024x8-sleep.json b/tapeout/src/test/resources/macros/lib-1024x8-sleep.json new file mode 100644 index 000000000..7736590d8 --- /dev/null +++ b/tapeout/src/test/resources/macros/lib-1024x8-sleep.json @@ -0,0 +1,35 @@ +[ + { + "type": "sram", + "name": "vendor_sram", + "depth": 1024, + "width": 8, + "ports": [ + { + "clock port name": "clock", + "mask granularity": 8, + "output port name": "RW0O", + "input port name": "RW0I", + "address port name": "RW0A", + "mask port name": "RW0M", + "chip enable port name": "RW0E", + "write enable port name": "RW0W", + "clock port polarity": "positive edge", + "output port polarity": "active high", + "input port polarity": "active high", + "address port polarity": "active high", + "mask port polarity": "active high", + "chip enable port polarity": "active high", + "write enable port polarity": "active high" + } + ], + "extra ports": [ + { + "name": "sleep", + "type": "constant", + "width": 1, + "value": 0 + } + ] + } +] diff --git a/tapeout/src/test/scala/transforms/macros/SplitDepth.scala b/tapeout/src/test/scala/transforms/macros/SplitDepth.scala index c5ef8e198..7a9f5b9d5 100644 --- a/tapeout/src/test/scala/transforms/macros/SplitDepth.scala +++ b/tapeout/src/test/scala/transforms/macros/SplitDepth.scala @@ -217,3 +217,59 @@ circuit name_of_sram_module : compile(mem, Some(lib), v, false) execute(Some(mem), Some(lib), false, output) } + + +class SplitDepth2048x8_mrw_Sleep extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x8-mrw.json") + val lib = new File(macroDir, "lib-1024x8-sleep.json") + val v = new File(testDir, "split_depth_2048x8_sleep.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + node RW0A_sel = bits(RW0A, 10, 10) + inst mem_0_0 of vendor_sram + mem_0_0.sleep <= UInt<1>("h0") + mem_0_0.clock <= clock + mem_0_0.RW0A <= RW0A + node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + mem_0_0.RW0I <= bits(RW0I, 7, 0) + mem_0_0.RW0M <= bits(RW0M, 0, 0) + mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) + mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) + node RW0O_0 = RW0O_0_0 + inst mem_1_0 of vendor_sram + mem_1_0.sleep <= UInt<1>("h0") + mem_1_0.clock <= clock + mem_1_0.RW0A <= RW0A + node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) + mem_1_0.RW0I <= bits(RW0I, 7, 0) + mem_1_0.RW0M <= bits(RW0M, 0, 0) + mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) + mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) + node RW0O_1 = RW0O_1_0 + RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + + extmodule vendor_sram : + input clock : Clock + input RW0A : UInt<10> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + input sleep : UInt<1> + + defname = vendor_sram +""" + compile(mem, Some(lib), v, false) + execute(Some(mem), Some(lib), false, output) +} From 9e7c8dce3e27bb492015399d1dc416b727c385bc Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Wed, 5 Jul 2017 12:46:58 -0700 Subject: [PATCH 030/273] add SynFlops --- .../transforms/macros/MacroCompiler.scala | 39 +- .../scala/transforms/macros/SynFlops.scala | 110 ++++++ .../main/scala/transforms/macros/Utils.scala | 17 +- .../transforms/macros/MacroCompilerSpec.scala | 14 +- .../scala/transforms/macros/SynFlops.scala | 333 ++++++++++++++++++ 5 files changed, 479 insertions(+), 34 deletions(-) create mode 100644 tapeout/src/main/scala/transforms/macros/SynFlops.scala create mode 100644 tapeout/src/test/scala/transforms/macros/SynFlops.scala diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala index 0330ab135..87386c12e 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -25,22 +25,10 @@ object MacroCompilerAnnotation { } } -class MacroCompilerPass(memFile: Option[File], - libFile: Option[File]) extends firrtl.passes.Pass { - require(memFile.isDefined) - private val mems: Option[Seq[Macro]] = readJSON(memFile) map (_ map (x => new Macro(x))) - private val libs: Option[Seq[Macro]] = readJSON(libFile) map (_ map (x => new Macro(x))) - +class MacroCompilerPass(mems: Option[Seq[Macro]], + libs: Option[Seq[Macro]]) extends firrtl.passes.Pass { def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { - val pairedPorts = ( - (mem.ports filter (p => p.inputName.isDefined && !p.outputName.isDefined)) ++ // write - (mem.ports filter (p => !p.inputName.isDefined && p.outputName.isDefined)) ++ // read - (mem.ports filter (p => p.inputName.isDefined && p.outputName.isDefined)) // read writers - ) zip ( - (lib.ports filter (p => p.inputName.isDefined && !p.outputName.isDefined)) ++ // write - (lib.ports filter (p => !p.inputName.isDefined && p.outputName.isDefined)) ++ // read - (lib.ports filter (p => p.inputName.isDefined && p.outputName.isDefined)) // read writers - ) + val pairedPorts = mem.sortedPorts zip lib.sortedPorts // Parallel mapping val pairs = ArrayBuffer[(BigInt, BigInt)]() @@ -74,7 +62,6 @@ class MacroCompilerPass(memFile: Option[File], pairs += ((last, mem.width.toInt - 1)) // Serial mapping - val instType = BundleType(lib.ports flatMap (_.tpe.fields)) val stmts = ArrayBuffer[Statement]() val selects = HashMap[String, Expression]() val outputs = HashMap[String, ArrayBuffer[(Expression, Expression)]]() @@ -93,7 +80,7 @@ class MacroCompilerPass(memFile: Option[File], for ((off, i) <- (0 until mem.depth.toInt by lib.depth.toInt).zipWithIndex) { for (j <- pairs.indices) { val name = s"mem_${i}_${j}" - stmts += WDefInstance(NoInfo, name, lib.name, instType) + stmts += WDefInstance(NoInfo, name, lib.name, lib.tpe) // connect extra ports stmts ++= lib.extraPorts map { case (portName, portValue) => Connect(NoInfo, WSubField(WRef(name), portName), portValue) @@ -109,12 +96,7 @@ class MacroCompilerPass(memFile: Option[File], def andAddrMatch(e: Expression) = and(e, addrMatch) val cats = ArrayBuffer[Expression]() for (((low, high), j) <- pairs.zipWithIndex) { - val inst = WRef(s"mem_${i}_${j}", instType) - def invert(exp: Expression, polarity: Option[PortPolarity]) = - polarity match { - case Some(ActiveLow) | Some(NegativeEdge) => not(exp) - case _ => exp - } + val inst = WRef(s"mem_${i}_${j}", lib.tpe) def connectPorts(mem: Expression, lib: String, @@ -344,10 +326,13 @@ class MacroCompilerTransform extends Transform { def inputForm = HighForm def outputForm = HighForm def execute(state: CircuitState) = getMyAnnotations(state) match { - case Seq(MacroCompilerAnnotation(state.circuit.main, mem, lib, synflops)) => + case Seq(MacroCompilerAnnotation(state.circuit.main, memFile, libFile, synflops)) => + require(memFile.isDefined) + val mems: Option[Seq[Macro]] = readJSON(memFile) map (_ map (x => new Macro(x))) + val libs: Option[Seq[Macro]] = readJSON(libFile) map (_ map (x => new Macro(x))) val transforms = Seq( - new MacroCompilerPass(mem, lib), - // TODO: Syn flops + new MacroCompilerPass(mems, libs), + new SynFlopsPass(synflops, libs getOrElse mems.get), firrtl.passes.SplitExpressions ) ((transforms foldLeft state)((s, xform) => xform runTransform s)) @@ -359,7 +344,7 @@ class MacroCompiler extends Compiler { def transforms = Seq(new MacroCompilerTransform) ++ getLoweringTransforms(firrtl.HighForm, firrtl.LowForm) // ++ - // Seq(new LowFirrtlOptimization) // Todo: This is dangerous... + // Seq(new LowFirrtlOptimization) // Todo: This is dangerous } object MacroCompiler extends App { diff --git a/tapeout/src/main/scala/transforms/macros/SynFlops.scala b/tapeout/src/main/scala/transforms/macros/SynFlops.scala new file mode 100644 index 000000000..499258c87 --- /dev/null +++ b/tapeout/src/main/scala/transforms/macros/SynFlops.scala @@ -0,0 +1,110 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.macros + +import firrtl._ +import firrtl.ir._ +import firrtl.Utils._ +import firrtl.passes.MemPortUtils.{memPortField, memType} +import Utils._ + +class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pass { + lazy val libMods = (libs map { lib => lib.name -> { + val dataType = (lib.ports foldLeft (None: Option[BigInt]))((res, port) => + (res, port.maskName) match { + case (_, None) => + res + case (None, Some(_)) => + Some(port.effectiveMaskGran) + case (Some(x), Some(_)) => + assert(x == port.effectiveMaskGran) + res + } + ) match { + case None => UIntType(IntWidth(lib.width)) + case Some(gran) => VectorType(UIntType(IntWidth(gran)), (lib.width / gran).toInt) + } + + val mem = DefMemory( + NoInfo, + "ram", + dataType, + lib.depth.toInt, + 1, // writeLatency + 0, // readLatency + (lib.readers ++ lib.readwriters).indices map (i => s"R_$i"), + (lib.writers ++ lib.readwriters).indices map (i => s"W_$i"), + Nil + ) + + val readConnects = (lib.readers ++ lib.readwriters).zipWithIndex flatMap { case (r, i) => + val clock = invert(WRef(r.clockName), r.clockPolarity) + val address = invert(WRef(r.addressName), r.addressPolarity) + val enable = (r.chipEnableName, r.readEnableName) match { + case (Some(en), Some(re)) => + and(invert(WRef(en), r.chipEnablePolarity), + invert(WRef(re), r.readEnablePolarity)) + case (Some(en), None) => invert(WRef(en), r.chipEnablePolarity) + case (None, Some(re)) => invert(WRef(re), r.readEnablePolarity) + case (None, None) => one + } + val data = memPortField(mem, s"R_$i", "data") + val read = (dataType: @unchecked) match { + case VectorType(tpe, size) => cat(((0 until size) map (k => + WSubIndex(data, k, tpe, UNKNOWNGENDER))).reverse) + case _: UIntType => data + } + val addrReg = WRef(s"R_${i}_addr_reg", r.AddrType, RegKind) + Seq( + DefRegister(NoInfo, addrReg.name, r.AddrType, clock, zero, addrReg), + Connect(NoInfo, memPortField(mem, s"R_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), addrReg), + Connect(NoInfo, memPortField(mem, s"R_$i", "en"), enable), + Connect(NoInfo, WRef(r.outputName.get), read), + Connect(NoInfo, addrReg, Mux(enable, address, addrReg, UnknownType)) + ) + } + + val writeConnects = (lib.writers ++ lib.readwriters).zipWithIndex flatMap { case (w, i) => + val clock = invert(WRef(w.clockName), w.clockPolarity) + val address = invert(WRef(w.addressName), w.addressPolarity) + val enable = (w.chipEnableName, w.writeEnableName) match { + case (Some(en), Some(we)) => + and(invert(WRef(en), w.chipEnablePolarity), + invert(WRef(we), w.writeEnablePolarity)) + case (Some(en), None) => invert(WRef(en), w.chipEnablePolarity) + case (None, Some(we)) => invert(WRef(we), w.writeEnablePolarity) + case (None, None) => zero // is it possible? + } + val mask = memPortField(mem, s"W_$i", "mask") + val data = memPortField(mem, s"W_$i", "data") + val write = invert(WRef(w.inputName.get), w.inputPolarity) + Seq( + Connect(NoInfo, memPortField(mem, s"W_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"W_$i", "addr"), address), + Connect(NoInfo, memPortField(mem, s"W_$i", "en"), enable) + ) ++ (dataType match { + case VectorType(tpe, size) => + val width = bitWidth(tpe).toInt + ((0 until size) map (k => + Connect(NoInfo, WSubIndex(data, k, tpe, UNKNOWNGENDER), + bits(write, (k + 1) * width - 1, k * width)))) ++ + ((0 until size) map (k => + Connect(NoInfo, WSubIndex(mask, k, BoolType, UNKNOWNGENDER), + bits(WRef(w.maskName.get), k)))) + case _: UIntType => + Seq(Connect(NoInfo, data, write), Connect(NoInfo, mask, one)) + }) + } + lib.module(Block(mem +: (readConnects ++ writeConnects))) + }}).toMap + + def run(c: Circuit): Circuit = { + if (!synflops) c + else { + val circuit = c.copy(modules = (c.modules map (m => libMods getOrElse (m.name, m)))) + // print(circuit.serialize) + circuit + } + } +} diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index 5a2e34e66..4520a114f 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -48,9 +48,9 @@ case class MacroPort( width: BigInt, depth: BigInt) { val effectiveMaskGran = maskGran.getOrElse(width) - private val AddrType = UIntType(IntWidth(ceilLog2(depth) max 1)) - private val DataType = UIntType(IntWidth(width)) - private val MaskType = UIntType(IntWidth(width / effectiveMaskGran)) + val AddrType = UIntType(IntWidth(ceilLog2(depth) max 1)) + val DataType = UIntType(IntWidth(width)) + val MaskType = UIntType(IntWidth(width / effectiveMaskGran)) val tpe = BundleType(Seq( Field(clockName, Flip, ClockType), Field(addressName, Flip, AddrType)) ++ @@ -93,6 +93,10 @@ class Macro(lib: Map[String, Any]) { depth ) } + val writers = ports filter (p => p.inputName.isDefined && !p.outputName.isDefined) + val readers = ports filter (p => !p.inputName.isDefined && p.outputName.isDefined) + val readwriters = ports filter (p => p.inputName.isDefined && p.outputName.isDefined) + val sortedPorts = writers ++ readers ++ readwriters val extraPorts = lib get "extra ports" match { case None => Nil case Some(p) => p.asInstanceOf[List[_]] map { x => @@ -104,6 +108,7 @@ class Macro(lib: Map[String, Any]) { (name -> UIntLiteral(value, IntWidth(width))) } } + val tpe = BundleType(ports flatMap (_.tpe.fields)) private val modPorts = (ports flatMap (_.ports)) ++ (extraPorts map { case (name, value) => Port(NoInfo, name, Input, value.tpe) }) val blackbox = ExtModule(NoInfo, name, modPorts, name, Nil) @@ -140,4 +145,10 @@ object Utils { else DoPrim(PrimOps.Cat, Seq(es.head, cat(es.tail)), Nil, UnknownType) def not(e: Expression) = DoPrim(PrimOps.Not, Seq(e), Nil, e.tpe) + + def invert(exp: Expression, polarity: Option[PortPolarity]) = + polarity match { + case Some(ActiveLow) | Some(NegativeEdge) => not(exp) + case _ => exp + } } diff --git a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala index 6b34204d7..765490f88 100644 --- a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala +++ b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala @@ -5,6 +5,7 @@ import firrtl.ir.{Circuit, NoInfo} import firrtl.passes.RemoveEmpty import firrtl.Parser.parse import java.io.{File, StringWriter} +import Utils.readJSON abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { val macroDir = new File("tapeout/src/test/resources/macros") @@ -19,11 +20,16 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate MacroCompiler.run(args(mem, lib, v, synflops)) } - def execute(mem: Option[File], lib: Option[File], synflops: Boolean, output: String) { - require(mem.isDefined) - val macros = Utils.readJSON(mem).get map (x => (new Macro(x)).blackbox) + def execute(memFile: Option[File], libFile: Option[File], synflops: Boolean, output: String) { + require(memFile.isDefined) + val mems = readJSON(memFile) map (_ map (x => new Macro(x))) + val libs = readJSON(libFile) map (_ map (x => new Macro(x))) + val macros = mems.get map (_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) - val passes = Seq(new MacroCompilerPass(mem, lib), RemoveEmpty) + val passes = Seq( + new MacroCompilerPass(mems, libs), + new SynFlopsPass(synflops, libs getOrElse mems.get), + RemoveEmpty) val result = (passes foldLeft circuit)((c, pass) => pass run c) val gold = RemoveEmpty run parse(output) (result.serialize) should be (gold.serialize) diff --git a/tapeout/src/test/scala/transforms/macros/SynFlops.scala b/tapeout/src/test/scala/transforms/macros/SynFlops.scala new file mode 100644 index 000000000..981e5e8d9 --- /dev/null +++ b/tapeout/src/test/scala/transforms/macros/SynFlops.scala @@ -0,0 +1,333 @@ +package barstools.tapeout.transforms.macros + +import java.io.File + +class Synflops2048x16_mrw extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x16-mrw.json") + val v = new File(testDir, "syn_flops_2048x16_mrw.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<16> + output RW0O : UInt<16> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<2> + + mem ram : + data-type => UInt<8>[2] + depth => 2048 + read-latency => 0 + write-latency => 1 + reader => R_0 + writer => W_0 + read-under-write => undefined + reg R_0_addr_reg : UInt<11>, clock with : + reset => (UInt<1>("h0"), R_0_addr_reg) + ram.R_0.clk <= clock + ram.R_0.addr <= R_0_addr_reg + ram.R_0.en <= RW0E + RW0O <= cat(ram.R_0.data[1], ram.R_0.data[0]) + R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) + ram.W_0.clk <= clock + ram.W_0.addr <= RW0A + ram.W_0.en <= and(RW0E, RW0W) + ram.W_0.data[0] <= bits(RW0I, 7, 0) + ram.W_0.data[1] <= bits(RW0I, 15, 8) + ram.W_0.mask[0] <= bits(RW0M, 0, 0) + ram.W_0.mask[1] <= bits(RW0M, 1, 1) +""" + compile(mem, None, v, true) + execute(Some(mem), None, true, output) +} + +class Synflops2048x8_r_mw extends MacroCompilerSpec { + val mem = new File(macroDir, "mem-2048x8-r-mw.json") + val v = new File(testDir, "syn_flops_2048x8_r_mw.v") + val output = +""" +circuit name_of_sram_module : + module name_of_sram_module : + input clock : Clock + input W0A : UInt<11> + input W0I : UInt<8> + input W0E : UInt<1> + input W0M : UInt<1> + input clock : Clock + input R0A : UInt<11> + output R0O : UInt<8> + + mem ram : + data-type => UInt<8>[1] + depth => 2048 + read-latency => 0 + write-latency => 1 + reader => R_0 + writer => W_0 + read-under-write => undefined + reg R_0_addr_reg : UInt<11>, clock with : + reset => (UInt<1>("h0"), R_0_addr_reg) + ram.R_0.clk <= clock + ram.R_0.addr <= R_0_addr_reg + ram.R_0.en <= UInt<1>("h1") + R0O <= ram.R_0.data[0] + R_0_addr_reg <= mux(UInt<1>("h1"), R0A, R_0_addr_reg) + ram.W_0.clk <= clock + ram.W_0.addr <= W0A + ram.W_0.en <= W0E + ram.W_0.data[0] <= bits(W0I, 7, 0) + ram.W_0.mask[0] <= bits(W0M, 0, 0) +""" + compile(mem, None, v, true) + execute(Some(mem), None, true, output) +} + +class Synflops2048x10_rw extends MacroCompilerSpec { + val mem = new File(macroDir, "lib-2048x10-rw.json") + val v = new File(testDir, "syn_flops_2048x10_rw.v") + val output = +""" +circuit vendor_sram : + module vendor_sram : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<10> + output RW0O : UInt<10> + input RW0E : UInt<1> + input RW0W : UInt<1> + + mem ram : + data-type => UInt<10> + depth => 2048 + read-latency => 0 + write-latency => 1 + reader => R_0 + writer => W_0 + read-under-write => undefined + reg R_0_addr_reg : UInt<11>, clock with : + reset => (UInt<1>("h0"), R_0_addr_reg) + ram.R_0.clk <= clock + ram.R_0.addr <= R_0_addr_reg + ram.R_0.en <= RW0E + RW0O <= ram.R_0.data + R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) + ram.W_0.clk <= clock + ram.W_0.addr <= RW0A + ram.W_0.en <= and(RW0E, RW0W) + ram.W_0.data <= RW0I + ram.W_0.mask <= UInt<1>("h1") +""" + compile(mem, None, v, true) + execute(Some(mem), None, true, output) +} + +class Synflops2048x8_mrw_re extends MacroCompilerSpec { + val mem = new File(macroDir, "lib-2048x8-mrw-re.json") + val v = new File(testDir, "syn_flops_2048x8_mrw_re.v") + val output = +""" +circuit vendor_sram : + module vendor_sram : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<8> + output RW0O : UInt<8> + input RW0E : UInt<1> + input RW0R : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<1> + + mem ram : + data-type => UInt<8>[1] + depth => 2048 + read-latency => 0 + write-latency => 1 + reader => R_0 + writer => W_0 + read-under-write => undefined + reg R_0_addr_reg : UInt<11>, clock with : + reset => (UInt<1>("h0"), R_0_addr_reg) + ram.R_0.clk <= clock + ram.R_0.addr <= R_0_addr_reg + ram.R_0.en <= and(RW0E, not(RW0R)) + RW0O <= ram.R_0.data[0] + R_0_addr_reg <= mux(and(RW0E, not(RW0R)), RW0A, R_0_addr_reg) + ram.W_0.clk <= clock + ram.W_0.addr <= RW0A + ram.W_0.en <= and(RW0E, RW0W) + ram.W_0.data[0] <= bits(RW0I, 7, 0) + ram.W_0.mask[0] <= bits(RW0M, 0, 0) +""" + compile(mem, None, v, true) + execute(Some(mem), None, true, output) +} + +class Synflops2048x16_n28 extends MacroCompilerSpec { + val mem = new File(macroDir, "lib-2048x16-n28.json") + val v = new File(testDir, "syn_flops_2048x16_n28.v") + val output = +""" +circuit vendor_sram_4 : + module vendor_sram_16 : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<16> + output RW0O : UInt<16> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<16> + + mem ram : + data-type => UInt<1>[16] + depth => 2048 + read-latency => 0 + write-latency => 1 + reader => R_0 + writer => W_0 + read-under-write => undefined + reg R_0_addr_reg : UInt<11>, clock with : + reset => (UInt<1>("h0"), R_0_addr_reg) + ram.R_0.clk <= clock + ram.R_0.addr <= R_0_addr_reg + ram.R_0.en <= RW0E + RW0O <= cat(ram.R_0.data[15], cat(ram.R_0.data[14], cat(ram.R_0.data[13], cat(ram.R_0.data[12], cat(ram.R_0.data[11], cat(ram.R_0.data[10], cat(ram.R_0.data[9], cat(ram.R_0.data[8], cat(ram.R_0.data[7], cat(ram.R_0.data[6], cat(ram.R_0.data[5], cat(ram.R_0.data[4], cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))))))))))))))) + R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) + ram.W_0.clk <= clock + ram.W_0.addr <= RW0A + ram.W_0.en <= and(RW0E, RW0W) + ram.W_0.data[0] <= bits(RW0I, 0, 0) + ram.W_0.data[1] <= bits(RW0I, 1, 1) + ram.W_0.data[2] <= bits(RW0I, 2, 2) + ram.W_0.data[3] <= bits(RW0I, 3, 3) + ram.W_0.data[4] <= bits(RW0I, 4, 4) + ram.W_0.data[5] <= bits(RW0I, 5, 5) + ram.W_0.data[6] <= bits(RW0I, 6, 6) + ram.W_0.data[7] <= bits(RW0I, 7, 7) + ram.W_0.data[8] <= bits(RW0I, 8, 8) + ram.W_0.data[9] <= bits(RW0I, 9, 9) + ram.W_0.data[10] <= bits(RW0I, 10, 10) + ram.W_0.data[11] <= bits(RW0I, 11, 11) + ram.W_0.data[12] <= bits(RW0I, 12, 12) + ram.W_0.data[13] <= bits(RW0I, 13, 13) + ram.W_0.data[14] <= bits(RW0I, 14, 14) + ram.W_0.data[15] <= bits(RW0I, 15, 15) + ram.W_0.mask[0] <= bits(RW0M, 0, 0) + ram.W_0.mask[1] <= bits(RW0M, 1, 1) + ram.W_0.mask[2] <= bits(RW0M, 2, 2) + ram.W_0.mask[3] <= bits(RW0M, 3, 3) + ram.W_0.mask[4] <= bits(RW0M, 4, 4) + ram.W_0.mask[5] <= bits(RW0M, 5, 5) + ram.W_0.mask[6] <= bits(RW0M, 6, 6) + ram.W_0.mask[7] <= bits(RW0M, 7, 7) + ram.W_0.mask[8] <= bits(RW0M, 8, 8) + ram.W_0.mask[9] <= bits(RW0M, 9, 9) + ram.W_0.mask[10] <= bits(RW0M, 10, 10) + ram.W_0.mask[11] <= bits(RW0M, 11, 11) + ram.W_0.mask[12] <= bits(RW0M, 12, 12) + ram.W_0.mask[13] <= bits(RW0M, 13, 13) + ram.W_0.mask[14] <= bits(RW0M, 14, 14) + ram.W_0.mask[15] <= bits(RW0M, 15, 15) + + module vendor_sram_4 : + input clock : Clock + input RW0A : UInt<11> + input RW0I : UInt<4> + output RW0O : UInt<4> + input RW0E : UInt<1> + input RW0W : UInt<1> + input RW0M : UInt<4> + + mem ram : + data-type => UInt<1>[4] + depth => 2048 + read-latency => 0 + write-latency => 1 + reader => R_0 + writer => W_0 + read-under-write => undefined + reg R_0_addr_reg : UInt<11>, clock with : + reset => (UInt<1>("h0"), R_0_addr_reg) + ram.R_0.clk <= clock + ram.R_0.addr <= R_0_addr_reg + ram.R_0.en <= RW0E + RW0O <= cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))) + R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) + ram.W_0.clk <= clock + ram.W_0.addr <= RW0A + ram.W_0.en <= and(RW0E, RW0W) + ram.W_0.data[0] <= bits(RW0I, 0, 0) + ram.W_0.data[1] <= bits(RW0I, 1, 1) + ram.W_0.data[2] <= bits(RW0I, 2, 2) + ram.W_0.data[3] <= bits(RW0I, 3, 3) + ram.W_0.mask[0] <= bits(RW0M, 0, 0) + ram.W_0.mask[1] <= bits(RW0M, 1, 1) + ram.W_0.mask[2] <= bits(RW0M, 2, 2) + ram.W_0.mask[3] <= bits(RW0M, 3, 3) +""" + compile(mem, None, v, true) + execute(Some(mem), None, true, output) +} + +class Synflops32x32_2rw extends MacroCompilerSpec { + val mem = new File(macroDir, "lib-32x32-2rw.json") + val v = new File(testDir, "syn_flops_32x32_2rw.v") + val output = +""" +circuit SRAM2RW32x32 : + module SRAM2RW32x32 : + input CE1 : Clock + input A1 : UInt<5> + input I1 : UInt<32> + output O1 : UInt<32> + input CSB1 : UInt<1> + input OEB1 : UInt<1> + input WEB1 : UInt<1> + input CE2 : Clock + input A2 : UInt<5> + input I2 : UInt<32> + output O2 : UInt<32> + input CSB2 : UInt<1> + input OEB2 : UInt<1> + input WEB2 : UInt<1> + + mem ram : + data-type => UInt<32> + depth => 32 + read-latency => 0 + write-latency => 1 + reader => R_0 + reader => R_1 + writer => W_0 + writer => W_1 + read-under-write => undefined + reg R_0_addr_reg : UInt<5>, CE1 with : + reset => (UInt<1>("h0"), R_0_addr_reg) + ram.R_0.clk <= CE1 + ram.R_0.addr <= R_0_addr_reg + ram.R_0.en <= and(not(CSB1), not(OEB1)) + O1 <= ram.R_0.data + R_0_addr_reg <= mux(and(not(CSB1), not(OEB1)), A1, R_0_addr_reg) + reg R_1_addr_reg : UInt<5>, CE2 with : + reset => (UInt<1>("h0"), R_1_addr_reg) + ram.R_1.clk <= CE2 + ram.R_1.addr <= R_1_addr_reg + ram.R_1.en <= and(not(CSB2), not(OEB2)) + O2 <= ram.R_1.data + R_1_addr_reg <= mux(and(not(CSB2), not(OEB2)), A2, R_1_addr_reg) + ram.W_0.clk <= CE1 + ram.W_0.addr <= A1 + ram.W_0.en <= and(not(CSB1), not(WEB1)) + ram.W_0.data <= I1 + ram.W_0.mask <= UInt<1>("h1") + ram.W_1.clk <= CE2 + ram.W_1.addr <= A2 + ram.W_1.en <= and(not(CSB2), not(WEB2)) + ram.W_1.data <= I2 + ram.W_1.mask <= UInt<1>("h1") +""" + compile(mem, None, v, true) + execute(Some(mem), None, true, output) +} From 02fef8e2c381f687787ca3c6f0f607c36dfbfce6 Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Wed, 5 Jul 2017 15:53:58 -0700 Subject: [PATCH 031/273] graceful handling of empty files --- .../scala/transforms/macros/MacroCompiler.scala | 15 ++++++++------- .../src/main/scala/transforms/macros/Utils.scala | 4 ++-- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala index 87386c12e..4edeef5fc 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -376,18 +376,19 @@ object MacroCompiler extends App { throw new Exception(usage) } - def run(args: List[String]) = { + def run(args: List[String]) { val (params, synflops) = parseArgs(Map[MacroParam, File](), false, args) try { val macros = readJSON(params get Macros).get map (x => (new Macro(x)).blackbox) - val circuit = Circuit(NoInfo, macros, macros.last.name) - val annotations = AnnotationMap(Seq(MacroCompilerAnnotation( - circuit.main, params(Macros), params get Library, synflops))) - val state = CircuitState(circuit, HighForm, Some(annotations)) val verilog = new FileWriter(params(Verilog)) - val result = new MacroCompiler compile (state, verilog) + if (macros.nonEmpty) { + val circuit = Circuit(NoInfo, macros, macros.last.name) + val annotations = AnnotationMap(Seq(MacroCompilerAnnotation( + circuit.main, params(Macros), params get Library, synflops))) + val state = CircuitState(circuit, HighForm, Some(annotations)) + val result = new MacroCompiler compile (state, verilog) + } verilog.close - result } catch { case e: java.util.NoSuchElementException => throw new Exception(usage) diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index 4520a114f..e0254253a 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -128,10 +128,10 @@ object Utils { case (res, _) => res } ) - case _ => None + case _ => Some(Nil) } } catch { - case _: Throwable => None + case _: Throwable => Some(Nil) } } From 2fd928fbe063d408dd6cffa3103813c397810f50 Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Fri, 21 Jul 2017 00:17:08 -0700 Subject: [PATCH 032/273] fix cost --- tapeout/src/main/scala/transforms/macros/MacroCompiler.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala index 4edeef5fc..6eceda5a4 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -298,8 +298,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + // (mem.depth * mem.width) // Donggyu: I re-define cost - val cost = max(1, mem.depth / lib.depth) * - max(1, mem.width / lib.width) * + val cost = (((mem.depth - 1) / lib.depth) + 1) * + (((mem.width - 1) / lib.width) + 1) * (lib.depth * lib.width + 1) // weights on # cells System.err println s"Cost of ${lib.name} for ${mem.name}: ${cost}" if (cost > area) (best, area) From aeb303a61bfdeb108e04034732fcae1c980a9f40 Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Fri, 21 Jul 2017 00:27:48 -0700 Subject: [PATCH 033/273] Colin's fixes --- .../scala/transforms/macros/MacroCompiler.scala | 10 ++++------ .../main/scala/transforms/macros/SynFlops.scala | 4 ++-- .../main/scala/transforms/macros/Utils.scala | 17 +++++++++-------- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala index 6eceda5a4..e43418cd8 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -36,7 +36,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], for (i <- 0 until mem.width.toInt) { if (i <= last + 1) { /* Palmer: Every memory is going to have to fit at least a single bit. */ - // coninue + // continue } else if ((i - last) % lib.width.toInt == 0) { /* Palmer: It's possible that we rolled over a memory's width here, if so generate one. */ @@ -53,7 +53,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], case (_, None) => // continue case (_, Some(p)) if p == lib.width => // continue case _ => - System.err println "Bit-mask (or unmasked) target memories are suppored only" + System.err println "Bit-mask (or unmasked) target memories are supported only" return None } } @@ -198,7 +198,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * there isn't a write enable port. */ WRef(mem) case None => - /* Palemr: If there is no input port on the source memory port + /* Palmer: If there is no input port on the source memory port * then we don't ever want to turn on this write * enable. Otherwise, we just _always_ turn on the * write enable port on the inner memory. */ @@ -316,9 +316,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } case _ => c.modules } - val circuit = c.copy(modules = modules) - // print(circuit.serialize) - circuit + c.copy(modules = modules) } } diff --git a/tapeout/src/main/scala/transforms/macros/SynFlops.scala b/tapeout/src/main/scala/transforms/macros/SynFlops.scala index 499258c87..40718318f 100644 --- a/tapeout/src/main/scala/transforms/macros/SynFlops.scala +++ b/tapeout/src/main/scala/transforms/macros/SynFlops.scala @@ -54,9 +54,9 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa WSubIndex(data, k, tpe, UNKNOWNGENDER))).reverse) case _: UIntType => data } - val addrReg = WRef(s"R_${i}_addr_reg", r.AddrType, RegKind) + val addrReg = WRef(s"R_${i}_addr_reg", r.addrType, RegKind) Seq( - DefRegister(NoInfo, addrReg.name, r.AddrType, clock, zero, addrReg), + DefRegister(NoInfo, addrReg.name, r.addrType, clock, zero, addrReg), Connect(NoInfo, memPortField(mem, s"R_$i", "clk"), clock), Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), addrReg), Connect(NoInfo, memPortField(mem, s"R_$i", "en"), enable), diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index e0254253a..9f9d215e6 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -17,11 +17,12 @@ case object NegativeEdge extends PortPolarity case object PositiveEdge extends PortPolarity object PortPolarity { implicit def toPortPolarity(s: Any): PortPolarity = - (s: @unchecked) match { + s match { case "active low" => ActiveLow case "active high" => ActiveHigh case "negative edge" => NegativeEdge case "positive edge" => PositiveEdge + case _ => throw new firrtl.passes.PassException(s"Wrong port polarity: ${s.toString}") } implicit def toPortPolarity(s: Option[Any]): Option[PortPolarity] = s map toPortPolarity @@ -48,18 +49,18 @@ case class MacroPort( width: BigInt, depth: BigInt) { val effectiveMaskGran = maskGran.getOrElse(width) - val AddrType = UIntType(IntWidth(ceilLog2(depth) max 1)) - val DataType = UIntType(IntWidth(width)) - val MaskType = UIntType(IntWidth(width / effectiveMaskGran)) + val addrType = UIntType(IntWidth(ceilLog2(depth) max 1)) + val dataType = UIntType(IntWidth(width)) + val maskType = UIntType(IntWidth(width / effectiveMaskGran)) val tpe = BundleType(Seq( Field(clockName, Flip, ClockType), - Field(addressName, Flip, AddrType)) ++ - (inputName map (Field(_, Flip, DataType))) ++ - (outputName map (Field(_, Default, DataType))) ++ + Field(addressName, Flip, addrType)) ++ + (inputName map (Field(_, Flip, dataType))) ++ + (outputName map (Field(_, Default, dataType))) ++ (chipEnableName map (Field(_, Flip, BoolType))) ++ (readEnableName map (Field(_, Flip, BoolType))) ++ (writeEnableName map (Field(_, Flip, BoolType))) ++ - (maskName map (Field(_, Flip, MaskType))) + (maskName map (Field(_, Flip, maskType))) ) val ports = tpe.fields map (f => Port( NoInfo, f.name, f.flip match { case Default => Output case Flip => Input }, f.tpe)) From 57b0fec78e14bf4f46c625fcfdb17783324039a8 Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Fri, 21 Jul 2017 00:31:07 -0700 Subject: [PATCH 034/273] anonymize technology --- tapeout/src/test/resources/macros/{saed32.json => mylib.json} | 0 .../src/test/scala/transforms/macros/MacroCompilerSpec.scala | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename tapeout/src/test/resources/macros/{saed32.json => mylib.json} (100%) diff --git a/tapeout/src/test/resources/macros/saed32.json b/tapeout/src/test/resources/macros/mylib.json similarity index 100% rename from tapeout/src/test/resources/macros/saed32.json rename to tapeout/src/test/resources/macros/mylib.json diff --git a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala index 765490f88..f102f2502 100644 --- a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala +++ b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala @@ -38,7 +38,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate class RocketChipTest extends MacroCompilerSpec { val mem = new File(macroDir, "rocketchip.json") - val lib = new File(macroDir, "saed32.json") + val lib = new File(macroDir, "mylib.json") val v = new File(testDir, "rocketchip.macro.v") val output = // TODO: check correctness... """ From c79ea4790909196eb02998f4202c0c950abb7e54 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 19 Jul 2017 19:18:42 -0700 Subject: [PATCH 035/273] Port to MDF library and start re-developing tests --- .gitmodules | 3 + build.sbt | 11 +- mdf | 1 + .../transforms/macros/MacroCompiler.scala | 212 ++-- .../scala/transforms/macros/SynFlops.scala | 48 +- .../main/scala/transforms/macros/Utils.scala | 165 +-- .../test/resources/macros/lib-1024x8-mrw.json | 1 + .../test/resources/macros/mem-2048x8-mrw.json | 1 + .../transforms/macros/MacroCompilerSpec.scala | 635 ++++++------ .../scala/transforms/macros/SplitDepth.scala | 520 ++++++---- .../scala/transforms/macros/SplitWidth.scala | 936 +++++++++--------- .../scala/transforms/macros/SynFlops.scala | 636 ++++++------ 12 files changed, 1670 insertions(+), 1499 deletions(-) create mode 100644 .gitmodules create mode 160000 mdf diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..13bb74a0c --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "mdf"] + path = mdf + url = https://github.com/edwardcwang/plsi-mdf.git diff --git a/build.sbt b/build.sbt index 6f61668a8..cf97adbab 100644 --- a/build.sbt +++ b/build.sbt @@ -15,11 +15,18 @@ val defaultVersions = Map( "chisel-iotesters" -> "1.2-SNAPSHOT" ) +lazy val mdf = RootProject(file("mdf/scalalib")) + lazy val tapeout = (project in file("tapeout")) + .dependsOn(mdf) .settings(commonSettings) .settings( libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) - } + }, + resolvers ++= Seq( + Resolver.sonatypeRepo("snapshots"), + Resolver.sonatypeRepo("releases") + ) ) - .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) \ No newline at end of file + .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) diff --git a/mdf b/mdf new file mode 160000 index 000000000..89c15682a --- /dev/null +++ b/mdf @@ -0,0 +1 @@ +Subproject commit 89c15682aa85d0c9175c23706939533d7611e25d diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala index e43418cd8..9112351ec 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -8,19 +8,20 @@ import firrtl.PrimOps import firrtl.Utils._ import firrtl.annotations._ import firrtl.CompilerUtils.getLoweringTransforms +import mdf.macrolib.{PolarizedPort, PortPolarity} import scala.collection.mutable.{ArrayBuffer, HashMap} import java.io.{File, FileWriter} import Utils._ object MacroCompilerAnnotation { - def apply(c: String, mem: File, lib: Option[File], synflops: Boolean) = { + def apply(c: String, mem: String, lib: Option[String], synflops: Boolean) = { Annotation(CircuitName(c), classOf[MacroCompilerTransform], s"${mem} %s ${synflops}".format(lib map (_.toString) getOrElse "")) } private val matcher = "([^ ]+) ([^ ]*) (true|false)".r def unapply(a: Annotation) = a match { case Annotation(CircuitName(c), t, matcher(mem, lib, synflops)) if t == classOf[MacroCompilerTransform] => - Some((c, Some(new File(mem)), if (lib.isEmpty) None else Some(new File(lib)), synflops.toBoolean)) + Some((c, Some(mem), if (lib.isEmpty) None else Some(lib), synflops.toBoolean)) case _ => None } } @@ -33,11 +34,11 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Parallel mapping val pairs = ArrayBuffer[(BigInt, BigInt)]() var last = 0 - for (i <- 0 until mem.width.toInt) { + for (i <- 0 until mem.src.width) { if (i <= last + 1) { /* Palmer: Every memory is going to have to fit at least a single bit. */ // continue - } else if ((i - last) % lib.width.toInt == 0) { + } else if ((i - last) % lib.src.width.toInt == 0) { /* Palmer: It's possible that we rolled over a memory's width here, if so generate one. */ pairs += ((last, i-1)) @@ -45,13 +46,13 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } else { /* Palmer: FIXME: This is a mess, I must just be super confused. */ for ((memPort, libPort) <- pairedPorts) { - (memPort.maskGran, libPort.maskGran) match { + (memPort.src.maskGran, libPort.src.maskGran) match { case (_, Some(p)) if p == 1 => // continue case (Some(p), _) if i % p == 0 => pairs += ((last, i-1)) last = i case (_, None) => // continue - case (_, Some(p)) if p == lib.width => // continue + case (_, Some(p)) if p == lib.src.width => // continue case _ => System.err println "Bit-mask (or unmasked) target memories are supported only" return None @@ -59,7 +60,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } } } - pairs += ((last, mem.width.toInt - 1)) + pairs += ((last, mem.src.width.toInt - 1)) // Serial mapping val stmts = ArrayBuffer[Statement]() @@ -67,27 +68,27 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], val outputs = HashMap[String, ArrayBuffer[(Expression, Expression)]]() /* Palmer: If we've got a parallel memory then we've got to take the * address bits into account. */ - if (mem.depth > lib.depth) { - mem.ports foreach { port => - val high = ceilLog2(mem.depth) - val low = ceilLog2(lib.depth) - val ref = WRef(port.addressName) + if (mem.src.depth > lib.src.depth) { + mem.src.ports foreach { port => + val high = ceilLog2(mem.src.depth) + val low = ceilLog2(lib.src.depth) + val ref = WRef(port.address.name) val name = s"${ref.name}_sel" selects(ref.name) = WRef(name, UIntType(IntWidth(high-low))) stmts += DefNode(NoInfo, name, bits(ref, high-1, low)) } } - for ((off, i) <- (0 until mem.depth.toInt by lib.depth.toInt).zipWithIndex) { + for ((off, i) <- (0 until mem.src.depth by lib.src.depth).zipWithIndex) { for (j <- pairs.indices) { val name = s"mem_${i}_${j}" - stmts += WDefInstance(NoInfo, name, lib.name, lib.tpe) + stmts += WDefInstance(NoInfo, name, lib.src.name, lib.tpe) // connect extra ports stmts ++= lib.extraPorts map { case (portName, portValue) => Connect(NoInfo, WSubField(WRef(name), portName), portValue) } } for ((memPort, libPort) <- pairedPorts) { - val addrMatch = selects get memPort.addressName match { + val addrMatch = selects get memPort.src.address.name match { case None => one case Some(addr) => val index = UIntLiteral(i, IntWidth(bitWidth(addr.tpe))) @@ -98,33 +99,37 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], for (((low, high), j) <- pairs.zipWithIndex) { val inst = WRef(s"mem_${i}_${j}", lib.tpe) - def connectPorts(mem: Expression, + def connectPorts2(mem: Expression, lib: String, polarity: Option[PortPolarity]): Statement = - Connect(NoInfo, WSubField(inst, lib), invert(mem, polarity)) + Connect(NoInfo, WSubField(inst, lib), portToExpression(mem, polarity)) + def connectPorts(mem: Expression, + lib: String, + polarity: PortPolarity): Statement = + connectPorts2(mem, lib, Some(polarity)) // Clock port mapping /* Palmer: FIXME: I don't handle memories with read/write clocks yet. */ - stmts += connectPorts(WRef(memPort.clockName), - libPort.clockName, - libPort.clockPolarity) + stmts += connectPorts(WRef(memPort.src.clock.name), + libPort.src.clock.name, + libPort.src.clock.polarity) // Adress port mapping /* Palmer: The address port to a memory is just the low-order bits of * the top address. */ - stmts += connectPorts(WRef(memPort.addressName), - libPort.addressName, - libPort.addressPolarity) + stmts += connectPorts(WRef(memPort.src.address.name), + libPort.src.address.name, + libPort.src.address.polarity) // Output port mapping - (memPort.outputName, libPort.outputName) match { - case (Some(mem), Some(lib)) => + (memPort.src.output, libPort.src.output) match { + case (Some(PolarizedPort(mem, _)), Some(PolarizedPort(lib, lib_polarity))) => /* Palmer: In order to produce the output of a memory we need to cat * together a bunch of narrower memories, which can only be * done after generating all the memories. This saves up the * output statements for later. */ val name = s"${mem}_${i}_${j}" - val exp = invert(bits(WSubField(inst, lib), high-low, 0), libPort.outputPolarity) + val exp = portToExpression(bits(WSubField(inst, lib), high-low, 0), Some(lib_polarity)) stmts += DefNode(NoInfo, name, exp) cats += WRef(name) case (None, Some(lib)) => @@ -135,18 +140,18 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], /* Palmer: If there's no output ports at all (ie, read-only * port on the memory) then just don't worry about it, * there's nothing to do. */ - case (Some(mem), None) => + case (Some(PolarizedPort(mem, _)), None) => System.err println "WARNING: Unable to match output ports on memory" System.err println s" outer output port: ${mem}" return None } // Input port mapping - (memPort.inputName, libPort.inputName) match { - case (Some(mem), Some(lib)) => + (memPort.src.input, libPort.src.input) match { + case (Some(PolarizedPort(mem, _)), Some(PolarizedPort(lib, lib_polarity))) => /* Palmer: The input port to a memory just needs to happen in parallel, * this does a part select to narrow the memory down. */ - stmts += connectPorts(bits(WRef(mem), high, low), lib, libPort.inputPolarity) + stmts += connectPorts(bits(WRef(mem), high, low), lib, lib_polarity) case (None, Some(lib)) => /* Palmer: If the inner memory has an input port but the other * one doesn't then it's safe to just leave the inner @@ -157,43 +162,43 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], /* Palmer: If there's no input ports at all (ie, read-only * port on the memory) then just don't worry about it, * there's nothing to do. */ - case (Some(mem), None) => + case (Some(PolarizedPort(mem, _)), None) => System.err println "WARNING: Unable to match input ports on memory" System.err println s" outer input port: ${mem}" return None } // Mask port mapping - val memMask = memPort.maskName match { - case Some(mem) => + val memMask = memPort.src.maskPort match { + case Some(PolarizedPort(mem, _)) => /* Palmer: The bits from the outer memory's write mask that will be * used as the write mask for this inner memory. */ - if (libPort.effectiveMaskGran == libPort.width) { - bits(WRef(mem), low / memPort.effectiveMaskGran) + if (libPort.src.effectiveMaskGran == libPort.src.width) { + bits(WRef(mem), low / memPort.src.effectiveMaskGran) } else { - if (libPort.effectiveMaskGran != 1) { + if (libPort.src.effectiveMaskGran != 1) { // TODO System.err println "only single-bit mask supported" return None } - cat(((low to high) map (i => bits(WRef(mem), i / memPort.effectiveMaskGran))).reverse) + cat(((low to high) map (i => bits(WRef(mem), i / memPort.src.effectiveMaskGran))).reverse) } case None => /* Palmer: If there is no input port on the source memory port * then we don't ever want to turn on this write * enable. Otherwise, we just _always_ turn on the * write enable port on the inner memory. */ - if (!libPort.maskName.isDefined) one + if (libPort.src.maskPort.isEmpty) one else { - val width = libPort.width / libPort.effectiveMaskGran + val width = libPort.src.width / libPort.src.effectiveMaskGran val value = (BigInt(1) << width.toInt) - 1 UIntLiteral(value, IntWidth(width)) } } // Write enable port mapping - val memWriteEnable = memPort.writeEnableName match { - case Some(mem) => + val memWriteEnable = memPort.src.writeEnable match { + case Some(PolarizedPort(mem, _)) => /* Palmer: The outer memory's write enable port, or a constant 1 if * there isn't a write enable port. */ WRef(mem) @@ -202,60 +207,65 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * then we don't ever want to turn on this write * enable. Otherwise, we just _always_ turn on the * write enable port on the inner memory. */ - if (!memPort.inputName.isDefined) zero else one + if (memPort.src.input.isEmpty) zero else one } // Chip enable port mapping - val memChipEnable = memPort.chipEnableName match { - case Some(mem) => WRef(mem) + val memChipEnable = memPort.src.chipEnable match { + case Some(PolarizedPort(mem, _)) => WRef(mem) case None => one } - // Read enable port mapping + // Read enable port mapping /* Palmer: It's safe to ignore read enables, but we pass them through * to the vendor memory if there's a port on there that * implements the read enables. */ - (memPort.readEnableName, libPort.readEnableName) match { + (memPort.src.readEnable, libPort.src.readEnable) match { case (_, None) => - case (Some(mem), Some(lib)) => - stmts += connectPorts(andAddrMatch(WRef(mem)), lib, libPort.readEnablePolarity) - case (None, Some(lib)) => - stmts += connectPorts(andAddrMatch(not(memWriteEnable)), lib, libPort.readEnablePolarity) + case (Some(PolarizedPort(mem, _)), Some(PolarizedPort(lib, lib_polarity))) => + stmts += connectPorts(andAddrMatch(WRef(mem)), lib, lib_polarity) + case (None, Some(PolarizedPort(lib, lib_polarity))) => + stmts += connectPorts(andAddrMatch(not(memWriteEnable)), lib, lib_polarity) } /* Palmer: This is actually the memory compiler: it figures out how to * implement the outer memory's collection of ports using what - * the inner memory has availiable. */ - ((libPort.maskName, libPort.writeEnableName, libPort.chipEnableName): @unchecked) match { - case (Some(mask), Some(we), Some(en)) => + * the inner memory has availiable. */ + ((libPort.src.maskPort, libPort.src.writeEnable, libPort.src.chipEnable): @unchecked) match { + case (Some(PolarizedPort(mask, mask_polarity)), Some(PolarizedPort(we, we_polarity)), Some(PolarizedPort(en, en_polarity))) => /* Palmer: This is the simple option: every port exists. */ - stmts += connectPorts(memMask, mask, libPort.maskPolarity) - stmts += connectPorts(andAddrMatch(memWriteEnable), we, libPort.writeEnablePolarity) - stmts += connectPorts(andAddrMatch(memChipEnable), en, libPort.chipEnablePolarity) - case (Some(mask), Some(we), None) => - /* Palmer: If we don't have a chip enable but do have */ - stmts += connectPorts(memMask, mask, libPort.maskPolarity) + stmts += connectPorts(memMask, mask, mask_polarity) + stmts += connectPorts(andAddrMatch(memWriteEnable), we, we_polarity) + stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) + case (Some(PolarizedPort(mask, mask_polarity)), Some(PolarizedPort(we, we_polarity)), None) => + /* Palmer: If we don't have a chip enable but do have mask ports. */ + stmts += connectPorts(memMask, mask, mask_polarity) stmts += connectPorts(andAddrMatch(and(memWriteEnable, memChipEnable)), - we, libPort.writeEnablePolarity) - case (None, Some(we), Some(en)) if bitWidth(memMask.tpe) == 1 => + we, mask_polarity) + case (None, Some(PolarizedPort(we, we_polarity)), chipEnable) if bitWidth(memMask.tpe) == 1 => /* Palmer: If we're expected to provide mask ports without a * memory that actually has them then we can use the * write enable port instead of the mask port. */ stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), - we, libPort.writeEnablePolarity) - stmts += connectPorts(andAddrMatch(memChipEnable), en, libPort.chipEnablePolarity) - case (None, Some(we), Some(en)) => + we, we_polarity) + chipEnable match { + case Some(PolarizedPort(en, en_polarity)) => { + stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) + } + case _ => // TODO: do we care about the case where mem has chipEnable but lib doesn't? + } + case (None, Some(PolarizedPort(we, we_polarity)), Some(PolarizedPort(en, en_polarity))) => // TODO - System.err println "cannot emulate multi-bit mask ports with write enable" + System.err.println("cannot emulate multi-bit mask ports with write enable") return None case (None, None, None) => /* Palmer: There's nothing to do here since there aren't any - * ports to match up. */ + * ports to match up. */ } } // Cat macro outputs for selection - memPort.outputName match { - case Some(mem) if cats.nonEmpty => + memPort.src.output match { + case Some(PolarizedPort(mem, _)) if cats.nonEmpty => val name = s"${mem}_${i}" stmts += DefNode(NoInfo, name, cat(cats.toSeq.reverse)) (outputs getOrElseUpdate (mem, ArrayBuffer[(Expression, Expression)]())) += @@ -265,9 +275,9 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } } // Connect mem outputs - mem.ports foreach { port => - port.outputName match { - case Some(mem) => outputs get mem match { + mem.src.ports foreach { port => + port.output match { + case Some(PolarizedPort(mem, _)) => outputs get mem match { case Some(select) => val output = (select foldRight (zero: Expression)) { case ((cond, tval), fval) => Mux(cond, tval, fval, fval.tpe) } @@ -285,10 +295,10 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], val modules = (mems, libs) match { case (Some(mems), Some(libs)) => (mems foldLeft c.modules){ (modules, mem) => val (best, cost) = (libs foldLeft (None: Option[(Module, ExtModule)], BigInt(Long.MaxValue))){ - case ((best, area), lib) if mem.ports.size != lib.ports.size => + case ((best, area), lib) if mem.src.ports.size != lib.src.ports.size => /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same * order, but I'm starting with what actually gets generated. */ - System.err println s"INFO: unable to compile ${mem.name} using ${lib.name} port count must match" + System.err println s"INFO: unable to compile ${mem.src.name} using ${lib.src.name} port count must match" (best, area) case ((best, area), lib) => /* Palmer: A quick cost function (that must be kept in sync with @@ -298,10 +308,10 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + // (mem.depth * mem.width) // Donggyu: I re-define cost - val cost = (((mem.depth - 1) / lib.depth) + 1) * - (((mem.width - 1) / lib.width) + 1) * - (lib.depth * lib.width + 1) // weights on # cells - System.err println s"Cost of ${lib.name} for ${mem.name}: ${cost}" + val cost = (((mem.src.depth - 1) / lib.src.depth) + 1) * + (((mem.src.width - 1) / lib.src.width) + 1) * + (lib.src.depth * lib.src.width + 1) // weights on # cells + System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${cost}") if (cost > area) (best, area) else compile(mem, lib) match { case None => (best, area) @@ -326,8 +336,17 @@ class MacroCompilerTransform extends Transform { def execute(state: CircuitState) = getMyAnnotations(state) match { case Seq(MacroCompilerAnnotation(state.circuit.main, memFile, libFile, synflops)) => require(memFile.isDefined) - val mems: Option[Seq[Macro]] = readJSON(memFile) map (_ map (x => new Macro(x))) - val libs: Option[Seq[Macro]] = readJSON(libFile) map (_ map (x => new Macro(x))) + // Read, eliminate None, get only SRAM, make firrtl macro + val mems: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(memFile) match { + case Some(x:Seq[mdf.macrolib.Macro]) => + Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) + case _ => None + } + val libs: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { + case Some(x:Seq[mdf.macrolib.Macro]) => + Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) + case _ => None + } val transforms = Seq( new MacroCompilerPass(mems, libs), new SynFlopsPass(synflops, libs getOrElse mems.get), @@ -349,24 +368,24 @@ object MacroCompiler extends App { sealed trait MacroParam case object Macros extends MacroParam case object Library extends MacroParam - case object Verilog extends MacroParam - type MacroParamMap = Map[MacroParam, File] + case object Verilog extends MacroParam + type MacroParamMap = Map[MacroParam, String] val usage = Seq( "Options:", " -m, --macro-list: The set of macros to compile", " -l, --library: The set of macros that have blackbox instances", " -v, --verilog: Verilog output", - " --syn-flop: Produces synthesizable flop-based memories") mkString "\n" + " --syn-flop: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes") mkString "\n" def parseArgs(map: MacroParamMap, synflops: Boolean, args: List[String]): (MacroParamMap, Boolean) = args match { case Nil => (map, synflops) case ("-m" | "--macro-list") :: value :: tail => - parseArgs(map + (Macros -> new File(value)), synflops, tail) + parseArgs(map + (Macros -> value), synflops, tail) case ("-l" | "--library") :: value :: tail => - parseArgs(map + (Library -> new File(value)), synflops, tail) + parseArgs(map + (Library -> value), synflops, tail) case ("-v" | "--verilog") :: value :: tail => - parseArgs(map + (Verilog -> new File(value)), synflops, tail) + parseArgs(map + (Verilog -> value), synflops, tail) case "--syn-flops" :: tail => parseArgs(map, true, tail) case arg :: tail => @@ -375,18 +394,29 @@ object MacroCompiler extends App { } def run(args: List[String]) { - val (params, synflops) = parseArgs(Map[MacroParam, File](), false, args) + val (params, synflops) = parseArgs(Map[MacroParam, String](), false, args) try { - val macros = readJSON(params get Macros).get map (x => (new Macro(x)).blackbox) - val verilog = new FileWriter(params(Verilog)) + val macros = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) + + // Open the writer for the output Verilog file. + val verilogWriter = new FileWriter(new File(params.get(Verilog).get)) + if (macros.nonEmpty) { val circuit = Circuit(NoInfo, macros, macros.last.name) val annotations = AnnotationMap(Seq(MacroCompilerAnnotation( - circuit.main, params(Macros), params get Library, synflops))) + circuit.main, params.get(Macros).get, params.get(Library), synflops))) val state = CircuitState(circuit, HighForm, Some(annotations)) - val result = new MacroCompiler compile (state, verilog) + + // Run the compiler. + val result = new MacroCompiler().compileAndEmit(state) + + // Extract Verilog circuit and write it. + verilogWriter.write(result.getEmittedCircuit.value) } - verilog.close + + // Close the writer. + verilogWriter.close() + } catch { case e: java.util.NoSuchElementException => throw new Exception(usage) diff --git a/tapeout/src/main/scala/transforms/macros/SynFlops.scala b/tapeout/src/main/scala/transforms/macros/SynFlops.scala index 40718318f..c4a848726 100644 --- a/tapeout/src/main/scala/transforms/macros/SynFlops.scala +++ b/tapeout/src/main/scala/transforms/macros/SynFlops.scala @@ -9,9 +9,9 @@ import firrtl.passes.MemPortUtils.{memPortField, memType} import Utils._ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pass { - lazy val libMods = (libs map { lib => lib.name -> { - val dataType = (lib.ports foldLeft (None: Option[BigInt]))((res, port) => - (res, port.maskName) match { + lazy val libMods = (libs map { lib => lib.src.name -> { + val dataType = (lib.src.ports foldLeft (None: Option[BigInt]))((res, port) => + (res, port.maskPort) match { case (_, None) => res case (None, Some(_)) => @@ -21,15 +21,15 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa res } ) match { - case None => UIntType(IntWidth(lib.width)) - case Some(gran) => VectorType(UIntType(IntWidth(gran)), (lib.width / gran).toInt) + case None => UIntType(IntWidth(lib.src.width)) + case Some(gran) => VectorType(UIntType(IntWidth(gran)), (lib.src.width / gran).toInt) } val mem = DefMemory( NoInfo, "ram", dataType, - lib.depth.toInt, + lib.src.depth, 1, // writeLatency 0, // readLatency (lib.readers ++ lib.readwriters).indices map (i => s"R_$i"), @@ -38,14 +38,14 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa ) val readConnects = (lib.readers ++ lib.readwriters).zipWithIndex flatMap { case (r, i) => - val clock = invert(WRef(r.clockName), r.clockPolarity) - val address = invert(WRef(r.addressName), r.addressPolarity) - val enable = (r.chipEnableName, r.readEnableName) match { - case (Some(en), Some(re)) => - and(invert(WRef(en), r.chipEnablePolarity), - invert(WRef(re), r.readEnablePolarity)) - case (Some(en), None) => invert(WRef(en), r.chipEnablePolarity) - case (None, Some(re)) => invert(WRef(re), r.readEnablePolarity) + val clock = portToExpression(r.src.clock) + val address = portToExpression(r.src.address) + val enable = (r.src chipEnable, r.src readEnable) match { + case (Some(en_port), Some(re_port)) => + and(portToExpression(en_port), + portToExpression(re_port)) + case (Some(en_port), None) => portToExpression(en_port) + case (None, Some(re_port)) => portToExpression(re_port) case (None, None) => one } val data = memPortField(mem, s"R_$i", "data") @@ -60,25 +60,25 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa Connect(NoInfo, memPortField(mem, s"R_$i", "clk"), clock), Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), addrReg), Connect(NoInfo, memPortField(mem, s"R_$i", "en"), enable), - Connect(NoInfo, WRef(r.outputName.get), read), + Connect(NoInfo, WRef(r.src.output.get.name), read), Connect(NoInfo, addrReg, Mux(enable, address, addrReg, UnknownType)) ) } val writeConnects = (lib.writers ++ lib.readwriters).zipWithIndex flatMap { case (w, i) => - val clock = invert(WRef(w.clockName), w.clockPolarity) - val address = invert(WRef(w.addressName), w.addressPolarity) - val enable = (w.chipEnableName, w.writeEnableName) match { + val clock = portToExpression(w.src.clock) + val address = portToExpression(w.src.address) + val enable = (w.src.chipEnable, w.src.writeEnable) match { case (Some(en), Some(we)) => - and(invert(WRef(en), w.chipEnablePolarity), - invert(WRef(we), w.writeEnablePolarity)) - case (Some(en), None) => invert(WRef(en), w.chipEnablePolarity) - case (None, Some(we)) => invert(WRef(we), w.writeEnablePolarity) + and(portToExpression(en), + portToExpression(we)) + case (Some(en), None) => portToExpression(en) + case (None, Some(we)) => portToExpression(we) case (None, None) => zero // is it possible? } val mask = memPortField(mem, s"W_$i", "mask") val data = memPortField(mem, s"W_$i", "data") - val write = invert(WRef(w.inputName.get), w.inputPolarity) + val write = portToExpression(w.src.input.get) Seq( Connect(NoInfo, memPortField(mem, s"W_$i", "clk"), clock), Connect(NoInfo, memPortField(mem, s"W_$i", "addr"), address), @@ -91,7 +91,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa bits(write, (k + 1) * width - 1, k * width)))) ++ ((0 until size) map (k => Connect(NoInfo, WSubIndex(mask, k, BoolType, UNKNOWNGENDER), - bits(WRef(w.maskName.get), k)))) + bits(WRef(w.src.maskPort.get.name), k)))) case _: UIntType => Seq(Connect(NoInfo, data, write), Connect(NoInfo, mask, one)) }) diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index 9f9d215e6..2355c9f3a 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -6,133 +6,70 @@ import firrtl._ import firrtl.ir._ import firrtl.PrimOps import firrtl.Utils.{ceilLog2, BoolType} -import scala.util.parsing.json.JSON // Todo: this will be gone +import mdf.macrolib.{Constant, MacroPort, SRAMMacro} +import mdf.macrolib.{PolarizedPort, PortPolarity, ActiveLow, ActiveHigh, NegativeEdge, PositiveEdge} import java.io.File import scala.language.implicitConversions -trait PortPolarity -case object ActiveLow extends PortPolarity -case object ActiveHigh extends PortPolarity -case object NegativeEdge extends PortPolarity -case object PositiveEdge extends PortPolarity -object PortPolarity { - implicit def toPortPolarity(s: Any): PortPolarity = - s match { - case "active low" => ActiveLow - case "active high" => ActiveHigh - case "negative edge" => NegativeEdge - case "positive edge" => PositiveEdge - case _ => throw new firrtl.passes.PassException(s"Wrong port polarity: ${s.toString}") - } - implicit def toPortPolarity(s: Option[Any]): Option[PortPolarity] = - s map toPortPolarity -} +class FirrtlMacroPort(port: MacroPort) { + val src = port + + val isReader = !port.readEnable.isEmpty && port.writeEnable.isEmpty + val isWriter = !port.writeEnable.isEmpty && port.readEnable.isEmpty + val isReadWriter = !port.writeEnable.isEmpty && !port.readEnable.isEmpty -case class MacroPort( - clockName: String, - clockPolarity: Option[PortPolarity], - addressName: String, - addressPolarity: Option[PortPolarity], - inputName: Option[String], - inputPolarity: Option[PortPolarity], - outputName: Option[String], - outputPolarity: Option[PortPolarity], - chipEnableName: Option[String], - chipEnablePolarity: Option[PortPolarity], - readEnableName: Option[String], - readEnablePolarity: Option[PortPolarity], - writeEnableName: Option[String], - writeEnablePolarity: Option[PortPolarity], - maskName: Option[String], - maskPolarity: Option[PortPolarity], - maskGran: Option[BigInt], - width: BigInt, - depth: BigInt) { - val effectiveMaskGran = maskGran.getOrElse(width) - val addrType = UIntType(IntWidth(ceilLog2(depth) max 1)) - val dataType = UIntType(IntWidth(width)) - val maskType = UIntType(IntWidth(width / effectiveMaskGran)) + val AddrType = UIntType(IntWidth(ceilLog2(port.depth) max 1)) + val DataType = UIntType(IntWidth(port.width)) + val MaskType = UIntType(IntWidth(port.width / port.effectiveMaskGran)) + + // Bundle representing this macro port. val tpe = BundleType(Seq( - Field(clockName, Flip, ClockType), - Field(addressName, Flip, addrType)) ++ - (inputName map (Field(_, Flip, dataType))) ++ - (outputName map (Field(_, Default, dataType))) ++ - (chipEnableName map (Field(_, Flip, BoolType))) ++ - (readEnableName map (Field(_, Flip, BoolType))) ++ - (writeEnableName map (Field(_, Flip, BoolType))) ++ - (maskName map (Field(_, Flip, maskType))) + Field(port.clock.name, Flip, ClockType), + Field(port.address.name, Flip, AddrType)) ++ + (port.input map (p => Field(p.name, Flip, DataType))) ++ + (port.output map (p => Field(p.name, Default, DataType))) ++ + (port.chipEnable map (p => Field(p.name, Flip, BoolType))) ++ + (port.readEnable map (p => Field(p.name, Flip, BoolType))) ++ + (port.writeEnable map (p => Field(p.name, Flip, BoolType))) ++ + (port.maskPort map (p => Field(p.name, Flip, MaskType))) ) val ports = tpe.fields map (f => Port( NoInfo, f.name, f.flip match { case Default => Output case Flip => Input }, f.tpe)) } -class Macro(lib: Map[String, Any]) { - val name = lib("name").asInstanceOf[String] - val width = BigInt(lib("width").asInstanceOf[Double].toLong) - val depth = BigInt(lib("depth").asInstanceOf[Double].toLong) - val ports = lib("ports").asInstanceOf[List[_]] map { x => - val map = x.asInstanceOf[Map[String, Any]] - MacroPort( - map("clock port name").asInstanceOf[String], - map get "clock port polarity", - map("address port name").asInstanceOf[String], - map get "address port polarity", - map get "input port name" map (_.asInstanceOf[String]), - map get "input port polarity", - map get "output port name" map (_.asInstanceOf[String]), - map get "output port polarity", - map get "chip enable port name" map (_.asInstanceOf[String]), - map get "chip enable port polarity", - map get "read enable port name" map (_.asInstanceOf[String]), - map get "read enable port polarity", - map get "write enable port name" map (_.asInstanceOf[String]), - map get "write enable port polarity", - map get "mask port name" map (_.asInstanceOf[String]), - map get "mask port polarity", - map get "mask granularity" map (x => BigInt(x.asInstanceOf[Double].toLong)), - width, - depth - ) - } - val writers = ports filter (p => p.inputName.isDefined && !p.outputName.isDefined) - val readers = ports filter (p => !p.inputName.isDefined && p.outputName.isDefined) - val readwriters = ports filter (p => p.inputName.isDefined && p.outputName.isDefined) +// Reads an SRAMMacro and generates firrtl blackboxes. +class Macro(srcMacro: SRAMMacro) { + val src = srcMacro + + val firrtlPorts = srcMacro.ports map { new FirrtlMacroPort(_) } + + val writers = firrtlPorts filter (p => p.isReader) + val readers = firrtlPorts filter (p => p.isWriter) + val readwriters = firrtlPorts filter (p => p.isReadWriter) + val sortedPorts = writers ++ readers ++ readwriters - val extraPorts = lib get "extra ports" match { - case None => Nil - case Some(p) => p.asInstanceOf[List[_]] map { x => - val map = x.asInstanceOf[Map[String, Any]] - assert(map("type").asInstanceOf[String] == "constant") // TODO: release it? - val name = map("name").asInstanceOf[String] - val width = BigInt(map("width").asInstanceOf[Double].toLong) - val value = BigInt(map("value").asInstanceOf[Double].toLong) - (name -> UIntLiteral(value, IntWidth(width))) - } + val extraPorts = srcMacro.extraPorts map { p => + assert(p.portType == Constant) // TODO: release it? + val name = p.name + val width = BigInt(p.width.asInstanceOf[Double].toLong) + val value = BigInt(p.value.asInstanceOf[Double].toLong) + (name -> UIntLiteral(value, IntWidth(width))) } - val tpe = BundleType(ports flatMap (_.tpe.fields)) - private val modPorts = (ports flatMap (_.ports)) ++ + + // Bundle representing this memory blackbox + val tpe = BundleType(firrtlPorts flatMap (_.tpe.fields)) + + private val modPorts = (firrtlPorts flatMap (_.ports)) ++ (extraPorts map { case (name, value) => Port(NoInfo, name, Input, value.tpe) }) - val blackbox = ExtModule(NoInfo, name, modPorts, name, Nil) - def module(body: Statement) = Module(NoInfo, name, modPorts, body) + val blackbox = ExtModule(NoInfo, srcMacro.name, modPorts, srcMacro.name, Nil) + def module(body: Statement) = Module(NoInfo, srcMacro.name, modPorts, body) } object Utils { - def readJSON(file: Option[File]): Option[Seq[Map[String, Any]]] = file match { - case None => None - case Some(f) => try { - (JSON parseFull io.Source.fromFile(f).mkString) match { - case Some(p: List[Any]) => Some( - (p foldLeft Seq[Map[String, Any]]()){ - case (res, x: Map[_, _]) => - val map = x.asInstanceOf[Map[String, Any]] - if (map("type").asInstanceOf[String] == "sram") res :+ map else res - case (res, _) => res - } - ) - case _ => Some(Nil) - } - } catch { - case _: Throwable => Some(Nil) + def filterForSRAM(s: Option[Seq[mdf.macrolib.Macro]]): Option[Seq[mdf.macrolib.SRAMMacro]] = { + s match { + case Some(l:Seq[mdf.macrolib.Macro]) => Some(l filter { _.macroType == mdf.macrolib.SRAM } map { m => m.asInstanceOf[mdf.macrolib.SRAMMacro] }) + case _ => None } } @@ -147,7 +84,11 @@ object Utils { def not(e: Expression) = DoPrim(PrimOps.Not, Seq(e), Nil, e.tpe) - def invert(exp: Expression, polarity: Option[PortPolarity]) = + // Convert a port to a FIRRTL expression, handling polarity along the way. + def portToExpression(pp: PolarizedPort): Expression = + portToExpression(WRef(pp.name), Some(pp.polarity)) + + def portToExpression(exp: Expression, polarity: Option[PortPolarity]): Expression = polarity match { case Some(ActiveLow) | Some(NegativeEdge) => not(exp) case _ => exp diff --git a/tapeout/src/test/resources/macros/lib-1024x8-mrw.json b/tapeout/src/test/resources/macros/lib-1024x8-mrw.json index b85f45dcb..e5cfa0c29 100644 --- a/tapeout/src/test/resources/macros/lib-1024x8-mrw.json +++ b/tapeout/src/test/resources/macros/lib-1024x8-mrw.json @@ -4,6 +4,7 @@ "name": "vendor_sram", "depth": 1024, "width": 8, + "family": "1rw", "ports": [ { "clock port name": "clock", diff --git a/tapeout/src/test/resources/macros/mem-2048x8-mrw.json b/tapeout/src/test/resources/macros/mem-2048x8-mrw.json index 0873fbdb6..64f6bfd70 100644 --- a/tapeout/src/test/resources/macros/mem-2048x8-mrw.json +++ b/tapeout/src/test/resources/macros/mem-2048x8-mrw.json @@ -4,6 +4,7 @@ "name": "name_of_sram_module", "depth": 2048, "width": 8, + "family": "1rw", "ports": [ { "clock port name": "clock", diff --git a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala index f102f2502..a99397151 100644 --- a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala +++ b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala @@ -1,302 +1,381 @@ -package barstools.tapeout.transforms.macros +package barstools.tapeout.transforms.macros.test -import firrtl._ +import barstools.tapeout.transforms.macros._ import firrtl.ir.{Circuit, NoInfo} import firrtl.passes.RemoveEmpty import firrtl.Parser.parse import java.io.{File, StringWriter} -import Utils.readJSON + +// TODO: we should think of a less brittle way to run these tests. abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { - val macroDir = new File("tapeout/src/test/resources/macros") - val testDir = new File("test_run_dir/macros") ; testDir.mkdirs + val macroDir: String = "tapeout/src/test/resources/macros" + val testDir: String = "test_run_dir/macros" + new File(testDir).mkdirs // Make sure the testDir exists + + // Override these to change the prefixing of macroDir and testDir + val memPrefix: String = macroDir + val libPrefix: String = macroDir + val vPrefix: String = testDir - def args(mem: File, lib: Option[File], v: File, synflops: Boolean) = - List("-m", mem.toString, "-v", v.toString) ++ + private def args(mem: String, lib: Option[String], v: String, synflops: Boolean) = + List("-m", mem.toString, "-v", v) ++ (lib match { case None => Nil case Some(l) => List("-l", l.toString) }) ++ (if (synflops) List("--syn-flops") else Nil) - def compile(mem: File, lib: Option[File], v: File, synflops: Boolean) { - MacroCompiler.run(args(mem, lib, v, synflops)) + // Run the full compiler as if from the command line interface. + // Generates the Verilog; useful in testing since an error will throw an + // exception. + def compile(mem: String, lib: String, v: String, synflops: Boolean) { + compile(mem, Some(lib), v, synflops) } + def compile(mem: String, lib: Option[String], v: String, synflops: Boolean) { + var mem_full = concat(memPrefix, mem) + var lib_full = concat(libPrefix, lib) + var v_full = concat(vPrefix, v) + + MacroCompiler.run(args(mem_full, lib_full, v_full, synflops)) + } + + // Helper functions to write macro libraries to the given files. + def writeToLib(lib: String, libs: Seq[mdf.macrolib.Macro]) = { + mdf.macrolib.Utils.writeMDFToPath(Some(concat(libPrefix, lib)), libs) + } + + def writeToMem(mem: String, mems: Seq[mdf.macrolib.Macro]) = { + mdf.macrolib.Utils.writeMDFToPath(Some(concat(memPrefix, mem)), mems) + } + + // Execute the macro compiler and compare FIRRTL outputs. + // TODO: think of a less brittle way to test this? + def execute(memFile: String, libFile: String, synflops: Boolean, output: String): Unit = { + execute(Some(memFile), Some(libFile), synflops, output) + } + def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean, output: String): Unit = { + var mem_full = concat(memPrefix, memFile) + var lib_full = concat(libPrefix, libFile) - def execute(memFile: Option[File], libFile: Option[File], synflops: Boolean, output: String) { require(memFile.isDefined) - val mems = readJSON(memFile) map (_ map (x => new Macro(x))) - val libs = readJSON(libFile) map (_ map (x => new Macro(x))) - val macros = mems.get map (_.blackbox) + val mems: Seq[Macro] = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(mem_full)).get map (new Macro(_)) + val libs: Option[Seq[Macro]] = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(lib_full)) match { + case Some(x) => Some(x map (new Macro(_))) + case None => None + } + val macros = mems map (_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) val passes = Seq( - new MacroCompilerPass(mems, libs), - new SynFlopsPass(synflops, libs getOrElse mems.get), + new MacroCompilerPass(Some(mems), libs), + new SynFlopsPass(synflops, libs getOrElse mems), RemoveEmpty) val result = (passes foldLeft circuit)((c, pass) => pass run c) val gold = RemoveEmpty run parse(output) (result.serialize) should be (gold.serialize) } + + // Helper method to deal with String + Option[String] + private def concat(a: String, b: String): String = {a + "/" + b} + private def concat(a: String, b: Option[String]): Option[String] = { + b match { + case Some(b2:String) => Some(a + "/" + b2) + case _ => None + } + } } -class RocketChipTest extends MacroCompilerSpec { - val mem = new File(macroDir, "rocketchip.json") - val lib = new File(macroDir, "mylib.json") - val v = new File(testDir, "rocketchip.macro.v") - val output = // TODO: check correctness... -""" -circuit T_2172_ext : - module tag_array_ext : - input RW0_clk : Clock - input RW0_addr : UInt<6> - input RW0_wdata : UInt<80> - output RW0_rdata : UInt<80> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - input RW0_wmask : UInt<4> - - inst mem_0_0 of SRAM1RW64x32 - inst mem_0_1 of SRAM1RW64x32 - inst mem_0_2 of SRAM1RW64x32 - inst mem_0_3 of SRAM1RW64x32 - mem_0_0.CE <= RW0_clk - mem_0_0.A <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.O, 19, 0) - mem_0_0.I <= bits(RW0_wdata, 19, 0) - mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) - mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_1.CE <= RW0_clk - mem_0_1.A <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.O, 19, 0) - mem_0_1.I <= bits(RW0_wdata, 39, 20) - mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) - mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_2.CE <= RW0_clk - mem_0_2.A <= RW0_addr - node RW0_rdata_0_2 = bits(mem_0_2.O, 19, 0) - mem_0_2.I <= bits(RW0_wdata, 59, 40) - mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) - mem_0_2.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_3.CE <= RW0_clk - mem_0_3.A <= RW0_addr - node RW0_rdata_0_3 = bits(mem_0_3.O, 19, 0) - mem_0_3.I <= bits(RW0_wdata, 79, 60) - mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) - mem_0_3.CEB <= not(and(RW0_en, UInt<1>("h1"))) - node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) - - extmodule SRAM1RW64x32 : - input CE : Clock - input A : UInt<6> - input I : UInt<32> - output O : UInt<32> - input CEB : UInt<1> - input OEB : UInt<1> - input WEB : UInt<1> - - defname = SRAM1RW64x32 - - - module T_1090_ext : - input RW0_clk : Clock - input RW0_addr : UInt<9> - input RW0_wdata : UInt<64> - output RW0_rdata : UInt<64> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - - inst mem_0_0 of SRAM1RW512x32 - inst mem_0_1 of SRAM1RW512x32 - mem_0_0.CE <= RW0_clk - mem_0_0.A <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.O, 31, 0) - mem_0_0.I <= bits(RW0_wdata, 31, 0) - mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_0.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_1.CE <= RW0_clk - mem_0_1.A <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.O, 31, 0) - mem_0_1.I <= bits(RW0_wdata, 63, 32) - mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_1.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) - node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) - - module T_406_ext : - input RW0_clk : Clock - input RW0_addr : UInt<9> - input RW0_wdata : UInt<64> - output RW0_rdata : UInt<64> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - input RW0_wmask : UInt<8> - - inst mem_0_0 of SRAM1RW512x32 - inst mem_0_1 of SRAM1RW512x32 - inst mem_0_2 of SRAM1RW512x32 - inst mem_0_3 of SRAM1RW512x32 - inst mem_0_4 of SRAM1RW512x32 - inst mem_0_5 of SRAM1RW512x32 - inst mem_0_6 of SRAM1RW512x32 - inst mem_0_7 of SRAM1RW512x32 - mem_0_0.CE <= RW0_clk - mem_0_0.A <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.O, 7, 0) - mem_0_0.I <= bits(RW0_wdata, 7, 0) - mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) - mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_1.CE <= RW0_clk - mem_0_1.A <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.O, 7, 0) - mem_0_1.I <= bits(RW0_wdata, 15, 8) - mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) - mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_2.CE <= RW0_clk - mem_0_2.A <= RW0_addr - node RW0_rdata_0_2 = bits(mem_0_2.O, 7, 0) - mem_0_2.I <= bits(RW0_wdata, 23, 16) - mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) - mem_0_2.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_3.CE <= RW0_clk - mem_0_3.A <= RW0_addr - node RW0_rdata_0_3 = bits(mem_0_3.O, 7, 0) - mem_0_3.I <= bits(RW0_wdata, 31, 24) - mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) - mem_0_3.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_4.CE <= RW0_clk - mem_0_4.A <= RW0_addr - node RW0_rdata_0_4 = bits(mem_0_4.O, 7, 0) - mem_0_4.I <= bits(RW0_wdata, 39, 32) - mem_0_4.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) - mem_0_4.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_5.CE <= RW0_clk - mem_0_5.A <= RW0_addr - node RW0_rdata_0_5 = bits(mem_0_5.O, 7, 0) - mem_0_5.I <= bits(RW0_wdata, 47, 40) - mem_0_5.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) - mem_0_5.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_6.CE <= RW0_clk - mem_0_6.A <= RW0_addr - node RW0_rdata_0_6 = bits(mem_0_6.O, 7, 0) - mem_0_6.I <= bits(RW0_wdata, 55, 48) - mem_0_6.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) - mem_0_6.CEB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_7.CE <= RW0_clk - mem_0_7.A <= RW0_addr - node RW0_rdata_0_7 = bits(mem_0_7.O, 7, 0) - mem_0_7.I <= bits(RW0_wdata, 63, 56) - mem_0_7.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) - mem_0_7.CEB <= not(and(RW0_en, UInt<1>("h1"))) - node RW0_rdata_0 = cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) - - extmodule SRAM1RW512x32 : - input CE : Clock - input A : UInt<9> - input I : UInt<32> - output O : UInt<32> - input CEB : UInt<1> - input OEB : UInt<1> - input WEB : UInt<1> - - defname = SRAM1RW512x32 - - - module T_2172_ext : - input W0_clk : Clock - input W0_addr : UInt<6> - input W0_data : UInt<88> - input W0_en : UInt<1> - input W0_mask : UInt<4> - input R0_clk : Clock - input R0_addr : UInt<6> - output R0_data : UInt<88> - input R0_en : UInt<1> - - inst mem_0_0 of SRAM2RW64x32 - inst mem_0_1 of SRAM2RW64x32 - inst mem_0_2 of SRAM2RW64x32 - inst mem_0_3 of SRAM2RW64x32 - mem_0_0.CE1 <= W0_clk - mem_0_0.A1 <= W0_addr - mem_0_0.I1 <= bits(W0_data, 21, 0) - mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), UInt<1>("h1"))) - mem_0_0.CEB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_1.CE1 <= W0_clk - mem_0_1.A1 <= W0_addr - mem_0_1.I1 <= bits(W0_data, 43, 22) - mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), UInt<1>("h1"))) - mem_0_1.CEB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_2.CE1 <= W0_clk - mem_0_2.A1 <= W0_addr - mem_0_2.I1 <= bits(W0_data, 65, 44) - mem_0_2.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), UInt<1>("h1"))) - mem_0_2.CEB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_3.CE1 <= W0_clk - mem_0_3.A1 <= W0_addr - mem_0_3.I1 <= bits(W0_data, 87, 66) - mem_0_3.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), UInt<1>("h1"))) - mem_0_3.CEB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_0.CE2 <= R0_clk - mem_0_0.A2 <= R0_addr - node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) - mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.CEB2 <= not(and(R0_en, UInt<1>("h1"))) - mem_0_1.CE2 <= R0_clk - mem_0_1.A2 <= R0_addr - node R0_data_0_1 = bits(mem_0_1.O2, 21, 0) - mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.CEB2 <= not(and(R0_en, UInt<1>("h1"))) - mem_0_2.CE2 <= R0_clk - mem_0_2.A2 <= R0_addr - node R0_data_0_2 = bits(mem_0_2.O2, 21, 0) - mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_2.CEB2 <= not(and(R0_en, UInt<1>("h1"))) - mem_0_3.CE2 <= R0_clk - mem_0_3.A2 <= R0_addr - node R0_data_0_3 = bits(mem_0_3.O2, 21, 0) - mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_3.CEB2 <= not(and(R0_en, UInt<1>("h1"))) - node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) - R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) - - extmodule SRAM2RW64x32 : - input CE1 : Clock - input A1 : UInt<6> - input I1 : UInt<32> - output O1 : UInt<32> - input CEB1 : UInt<1> - input OEB1 : UInt<1> - input WEB1 : UInt<1> - input CE2 : Clock - input A2 : UInt<6> - input I2 : UInt<32> - output O2 : UInt<32> - input CEB2 : UInt<1> - input OEB2 : UInt<1> - input WEB2 : UInt<1> - - defname = SRAM2RW64x32 -""" - compile(mem, Some(lib), v, false) +trait HasSRAMGenerator { + import mdf.macrolib._ + + // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). + def generateSRAM(name: String, prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None): SRAMMacro = { + val realPrefix = prefix + "_" + SRAMMacro( + macroType=SRAM, + name=name, + width=width, + depth=depth, + family="1rw", + ports=Seq(MacroPort( + address=PolarizedPort(name=realPrefix + "addr", polarity=ActiveHigh), + clock=PolarizedPort(name=realPrefix + "clk", polarity=PositiveEdge), + + writeEnable=Some(PolarizedPort(name=realPrefix + "write_en", polarity=ActiveHigh)), + + output=Some(PolarizedPort(name=realPrefix + "dout", polarity=ActiveHigh)), + input=Some(PolarizedPort(name=realPrefix + "din", polarity=ActiveHigh)), + + maskPort=maskGran match { + case Some(x:Int) => Some(PolarizedPort(name=realPrefix + "mask", polarity=ActiveHigh)) + case _ => None + }, + maskGran=maskGran, + + width=width, depth=depth // These numbers don't matter here. + )) + ) + } } + +//~ class RocketChipTest extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "rocketchip.json") + //~ val lib = new File(macroDir, "mylib.json") + //~ val v = new File(testDir, "rocketchip.macro.v") + //~ val output = // TODO: check correctness... +//~ """ +//~ circuit T_2172_ext : + //~ module tag_array_ext : + //~ input RW0_clk : Clock + //~ input RW0_addr : UInt<6> + //~ input RW0_wdata : UInt<80> + //~ output RW0_rdata : UInt<80> + //~ input RW0_en : UInt<1> + //~ input RW0_wmode : UInt<1> + //~ input RW0_wmask : UInt<4> + + //~ inst mem_0_0 of SRAM1RW64x32 + //~ inst mem_0_1 of SRAM1RW64x32 + //~ inst mem_0_2 of SRAM1RW64x32 + //~ inst mem_0_3 of SRAM1RW64x32 + //~ mem_0_0.CE <= RW0_clk + //~ mem_0_0.A <= RW0_addr + //~ node RW0_rdata_0_0 = bits(mem_0_0.O, 19, 0) + //~ mem_0_0.I <= bits(RW0_wdata, 19, 0) + //~ mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) + //~ mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_1.CE <= RW0_clk + //~ mem_0_1.A <= RW0_addr + //~ node RW0_rdata_0_1 = bits(mem_0_1.O, 19, 0) + //~ mem_0_1.I <= bits(RW0_wdata, 39, 20) + //~ mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) + //~ mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_2.CE <= RW0_clk + //~ mem_0_2.A <= RW0_addr + //~ node RW0_rdata_0_2 = bits(mem_0_2.O, 19, 0) + //~ mem_0_2.I <= bits(RW0_wdata, 59, 40) + //~ mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) + //~ mem_0_2.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_3.CE <= RW0_clk + //~ mem_0_3.A <= RW0_addr + //~ node RW0_rdata_0_3 = bits(mem_0_3.O, 19, 0) + //~ mem_0_3.I <= bits(RW0_wdata, 79, 60) + //~ mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) + //~ mem_0_3.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) + //~ RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + //~ extmodule SRAM1RW64x32 : + //~ input CE : Clock + //~ input A : UInt<6> + //~ input I : UInt<32> + //~ output O : UInt<32> + //~ input CEB : UInt<1> + //~ input OEB : UInt<1> + //~ input WEB : UInt<1> + + //~ defname = SRAM1RW64x32 + + + //~ module T_1090_ext : + //~ input RW0_clk : Clock + //~ input RW0_addr : UInt<9> + //~ input RW0_wdata : UInt<64> + //~ output RW0_rdata : UInt<64> + //~ input RW0_en : UInt<1> + //~ input RW0_wmode : UInt<1> + + //~ inst mem_0_0 of SRAM1RW512x32 + //~ inst mem_0_1 of SRAM1RW512x32 + //~ mem_0_0.CE <= RW0_clk + //~ mem_0_0.A <= RW0_addr + //~ node RW0_rdata_0_0 = bits(mem_0_0.O, 31, 0) + //~ mem_0_0.I <= bits(RW0_wdata, 31, 0) + //~ mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_0.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_1.CE <= RW0_clk + //~ mem_0_1.A <= RW0_addr + //~ node RW0_rdata_0_1 = bits(mem_0_1.O, 31, 0) + //~ mem_0_1.I <= bits(RW0_wdata, 63, 32) + //~ mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_1.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) + //~ RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + //~ module T_406_ext : + //~ input RW0_clk : Clock + //~ input RW0_addr : UInt<9> + //~ input RW0_wdata : UInt<64> + //~ output RW0_rdata : UInt<64> + //~ input RW0_en : UInt<1> + //~ input RW0_wmode : UInt<1> + //~ input RW0_wmask : UInt<8> + + //~ inst mem_0_0 of SRAM1RW512x32 + //~ inst mem_0_1 of SRAM1RW512x32 + //~ inst mem_0_2 of SRAM1RW512x32 + //~ inst mem_0_3 of SRAM1RW512x32 + //~ inst mem_0_4 of SRAM1RW512x32 + //~ inst mem_0_5 of SRAM1RW512x32 + //~ inst mem_0_6 of SRAM1RW512x32 + //~ inst mem_0_7 of SRAM1RW512x32 + //~ mem_0_0.CE <= RW0_clk + //~ mem_0_0.A <= RW0_addr + //~ node RW0_rdata_0_0 = bits(mem_0_0.O, 7, 0) + //~ mem_0_0.I <= bits(RW0_wdata, 7, 0) + //~ mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) + //~ mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_1.CE <= RW0_clk + //~ mem_0_1.A <= RW0_addr + //~ node RW0_rdata_0_1 = bits(mem_0_1.O, 7, 0) + //~ mem_0_1.I <= bits(RW0_wdata, 15, 8) + //~ mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) + //~ mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_2.CE <= RW0_clk + //~ mem_0_2.A <= RW0_addr + //~ node RW0_rdata_0_2 = bits(mem_0_2.O, 7, 0) + //~ mem_0_2.I <= bits(RW0_wdata, 23, 16) + //~ mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) + //~ mem_0_2.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_3.CE <= RW0_clk + //~ mem_0_3.A <= RW0_addr + //~ node RW0_rdata_0_3 = bits(mem_0_3.O, 7, 0) + //~ mem_0_3.I <= bits(RW0_wdata, 31, 24) + //~ mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) + //~ mem_0_3.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_4.CE <= RW0_clk + //~ mem_0_4.A <= RW0_addr + //~ node RW0_rdata_0_4 = bits(mem_0_4.O, 7, 0) + //~ mem_0_4.I <= bits(RW0_wdata, 39, 32) + //~ mem_0_4.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) + //~ mem_0_4.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_5.CE <= RW0_clk + //~ mem_0_5.A <= RW0_addr + //~ node RW0_rdata_0_5 = bits(mem_0_5.O, 7, 0) + //~ mem_0_5.I <= bits(RW0_wdata, 47, 40) + //~ mem_0_5.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) + //~ mem_0_5.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_6.CE <= RW0_clk + //~ mem_0_6.A <= RW0_addr + //~ node RW0_rdata_0_6 = bits(mem_0_6.O, 7, 0) + //~ mem_0_6.I <= bits(RW0_wdata, 55, 48) + //~ mem_0_6.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) + //~ mem_0_6.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ mem_0_7.CE <= RW0_clk + //~ mem_0_7.A <= RW0_addr + //~ node RW0_rdata_0_7 = bits(mem_0_7.O, 7, 0) + //~ mem_0_7.I <= bits(RW0_wdata, 63, 56) + //~ mem_0_7.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + //~ mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) + //~ mem_0_7.CEB <= not(and(RW0_en, UInt<1>("h1"))) + //~ node RW0_rdata_0 = cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))) + //~ RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + //~ extmodule SRAM1RW512x32 : + //~ input CE : Clock + //~ input A : UInt<9> + //~ input I : UInt<32> + //~ output O : UInt<32> + //~ input CEB : UInt<1> + //~ input OEB : UInt<1> + //~ input WEB : UInt<1> + + //~ defname = SRAM1RW512x32 + + + //~ module T_2172_ext : + //~ input W0_clk : Clock + //~ input W0_addr : UInt<6> + //~ input W0_data : UInt<88> + //~ input W0_en : UInt<1> + //~ input W0_mask : UInt<4> + //~ input R0_clk : Clock + //~ input R0_addr : UInt<6> + //~ output R0_data : UInt<88> + //~ input R0_en : UInt<1> + + //~ inst mem_0_0 of SRAM2RW64x32 + //~ inst mem_0_1 of SRAM2RW64x32 + //~ inst mem_0_2 of SRAM2RW64x32 + //~ inst mem_0_3 of SRAM2RW64x32 + //~ mem_0_0.CE1 <= W0_clk + //~ mem_0_0.A1 <= W0_addr + //~ mem_0_0.I1 <= bits(W0_data, 21, 0) + //~ mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), UInt<1>("h1"))) + //~ mem_0_0.CEB1 <= not(and(W0_en, UInt<1>("h1"))) + //~ mem_0_1.CE1 <= W0_clk + //~ mem_0_1.A1 <= W0_addr + //~ mem_0_1.I1 <= bits(W0_data, 43, 22) + //~ mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), UInt<1>("h1"))) + //~ mem_0_1.CEB1 <= not(and(W0_en, UInt<1>("h1"))) + //~ mem_0_2.CE1 <= W0_clk + //~ mem_0_2.A1 <= W0_addr + //~ mem_0_2.I1 <= bits(W0_data, 65, 44) + //~ mem_0_2.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), UInt<1>("h1"))) + //~ mem_0_2.CEB1 <= not(and(W0_en, UInt<1>("h1"))) + //~ mem_0_3.CE1 <= W0_clk + //~ mem_0_3.A1 <= W0_addr + //~ mem_0_3.I1 <= bits(W0_data, 87, 66) + //~ mem_0_3.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), UInt<1>("h1"))) + //~ mem_0_3.CEB1 <= not(and(W0_en, UInt<1>("h1"))) + //~ mem_0_0.CE2 <= R0_clk + //~ mem_0_0.A2 <= R0_addr + //~ node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) + //~ mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + //~ mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_0.CEB2 <= not(and(R0_en, UInt<1>("h1"))) + //~ mem_0_1.CE2 <= R0_clk + //~ mem_0_1.A2 <= R0_addr + //~ node R0_data_0_1 = bits(mem_0_1.O2, 21, 0) + //~ mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + //~ mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_1.CEB2 <= not(and(R0_en, UInt<1>("h1"))) + //~ mem_0_2.CE2 <= R0_clk + //~ mem_0_2.A2 <= R0_addr + //~ node R0_data_0_2 = bits(mem_0_2.O2, 21, 0) + //~ mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + //~ mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_2.CEB2 <= not(and(R0_en, UInt<1>("h1"))) + //~ mem_0_3.CE2 <= R0_clk + //~ mem_0_3.A2 <= R0_addr + //~ node R0_data_0_3 = bits(mem_0_3.O2, 21, 0) + //~ mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + //~ mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_3.CEB2 <= not(and(R0_en, UInt<1>("h1"))) + //~ node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) + //~ R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) + + //~ extmodule SRAM2RW64x32 : + //~ input CE1 : Clock + //~ input A1 : UInt<6> + //~ input I1 : UInt<32> + //~ output O1 : UInt<32> + //~ input CEB1 : UInt<1> + //~ input OEB1 : UInt<1> + //~ input WEB1 : UInt<1> + //~ input CE2 : Clock + //~ input A2 : UInt<6> + //~ input I2 : UInt<32> + //~ output O2 : UInt<32> + //~ input CEB2 : UInt<1> + //~ input OEB2 : UInt<1> + //~ input WEB2 : UInt<1> + + //~ defname = SRAM2RW64x32 +//~ """ + //~ compile(mem, Some(lib), v, false) +//~ } diff --git a/tapeout/src/test/scala/transforms/macros/SplitDepth.scala b/tapeout/src/test/scala/transforms/macros/SplitDepth.scala index 7a9f5b9d5..d7ffebb0e 100644 --- a/tapeout/src/test/scala/transforms/macros/SplitDepth.scala +++ b/tapeout/src/test/scala/transforms/macros/SplitDepth.scala @@ -1,228 +1,171 @@ -package barstools.tapeout.transforms.macros +package barstools.tapeout.transforms.macros.test -import java.io.File +import firrtl.Utils.ceilLog2 +import mdf.macrolib._ -class SplitDepth2048x8_mrw extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x8-mrw.json") - val lib = new File(macroDir, "lib-1024x8-mrw.json") - val v = new File(testDir, "split_depth_2048x8_mrw.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> +// Test the depth splitting aspect of the memory compiler. +// For example, implementing a 4096x32 memory using four 1024x32 memories. - node RW0A_sel = bits(RW0A, 10, 10) - inst mem_0_0 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - mem_0_0.RW0I <= bits(RW0I, 7, 0) - mem_0_0.RW0M <= bits(RW0M, 0, 0) - mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) - mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) - node RW0O_0 = RW0O_0_0 - inst mem_1_0 of vendor_sram - mem_1_0.clock <= clock - mem_1_0.RW0A <= RW0A - node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) - mem_1_0.RW0I <= bits(RW0I, 7, 0) - mem_1_0.RW0M <= bits(RW0M, 0, 0) - mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) - mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) - node RW0O_1 = RW0O_1_0 - RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) +trait HasSimpleDepthTestGenerator { + this: MacroCompilerSpec with HasSRAMGenerator => + // Override these with "override lazy val". + // Why lazy? These are used in the constructor here so overriding non-lazily + // would be too late. + def width: Int + def mem_depth: Int + def lib_depth: Int - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<10> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> + require (mem_depth >= lib_depth) - defname = vendor_sram + override val memPrefix = testDir + override val libPrefix = testDir + + val mem = s"mem-${mem_depth}x${width}-rw.json" + val lib = s"lib-${lib_depth}x${width}-rw.json" + val v = s"split_depth_${mem_depth}x${width}_rw.v" + + val mem_name = "target_memory" + val mem_addr_width = ceilLog2(mem_depth) + + val lib_name = "awesome_lib_mem" + val lib_addr_width = ceilLog2(lib_depth) + + writeToLib(lib, Seq(generateSRAM(lib_name, "lib", width, lib_depth))) + writeToMem(mem, Seq(generateSRAM(mem_name, "outer", width, mem_depth))) + + val expectedInstances = mem_depth / lib_depth + val selectBits = mem_addr_width - lib_addr_width + var output = +s""" +circuit $mem_name : + module $mem_name : + input outer_clk : Clock + input outer_addr : UInt<$mem_addr_width> + input outer_din : UInt<$width> + output outer_dout : UInt<$width> + input outer_write_en : UInt<1> """ - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} -class SplitDepth2000x8_mrw extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2000x8-mrw.json") - val lib = new File(macroDir, "lib-1024x8-mrw.json") - val v = new File(testDir, "split_depth_2000x8_mrw.v") - val output = + if (selectBits > 0) { + output += +s""" + node outer_addr_sel = bits(outer_addr, ${mem_addr_width - 1}, $lib_addr_width) """ -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> + } - node RW0A_sel = bits(RW0A, 10, 10) - inst mem_0_0 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - mem_0_0.RW0I <= bits(RW0I, 7, 0) - mem_0_0.RW0M <= bits(RW0M, 0, 0) - mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) - mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) - node RW0O_0 = RW0O_0_0 - inst mem_1_0 of vendor_sram - mem_1_0.clock <= clock - mem_1_0.RW0A <= RW0A - node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) - mem_1_0.RW0I <= bits(RW0I, 7, 0) - mem_1_0.RW0M <= bits(RW0M, 0, 0) - mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) - mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) - node RW0O_1 = RW0O_1_0 - RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + for (i <- 0 to expectedInstances - 1) { + val enableIdentifier = if (selectBits > 0) s"""eq(outer_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" + output += +s""" + inst mem_${i}_0 of awesome_lib_mem + mem_${i}_0.lib_clk <= outer_clk + mem_${i}_0.lib_addr <= outer_addr + node outer_dout_${i}_0 = bits(mem_${i}_0.lib_dout, ${width - 1}, 0) + mem_${i}_0.lib_din <= bits(outer_din, ${width - 1}, 0) + mem_${i}_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), ${enableIdentifier}) + node outer_dout_${i} = outer_dout_${i}_0 +""" + } + def generate_outer_dout_tree(i:Int, expectedInstances: Int): String = { + if (i > expectedInstances - 1) { + "UInt<1>(\"h0\")" + } else { + "mux(eq(outer_addr_sel, UInt<%d>(\"h%s\")), outer_dout_%d, %s)".format( + selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, expectedInstances) + ) + } + } + output += " outer_dout <= " + if (selectBits > 0) { + output += generate_outer_dout_tree(0, expectedInstances) + } else { + output += """mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0"))""" + } - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<10> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> + output += +s""" + extmodule $lib_name : + input lib_clk : Clock + input lib_addr : UInt<$lib_addr_width> + input lib_din : UInt<$width> + output lib_dout : UInt<$width> + input lib_write_en : UInt<1> - defname = vendor_sram + defname = $lib_name """ - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) } -class SplitDepth2048x8_n28 extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x8-mrw.json") - val lib = new File(macroDir, "lib-1024x8-n28.json") - val v = new File(testDir, "split_depth_2048x8_n28.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> +// Try different widths +class SplitDepth4096x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 4096 + override lazy val lib_depth = 1024 - node RW0A_sel = bits(RW0A, 10, 10) - inst mem_0_0 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - mem_0_0.RW0I <= bits(RW0I, 7, 0) - mem_0_0.RW0M <= cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))) - mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) - mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) - node RW0O_0 = RW0O_0_0 - inst mem_1_0 of vendor_sram - mem_1_0.clock <= clock - mem_1_0.RW0A <= RW0A - node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) - mem_1_0.RW0I <= bits(RW0I, 7, 0) - mem_1_0.RW0M <= cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))) - mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) - mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) - node RW0O_1 = RW0O_1_0 - RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<10> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<8> +class SplitDepth4096x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 16 + override lazy val mem_depth = 4096 + override lazy val lib_depth = 1024 - defname = vendor_sram -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) + compile(mem, lib, v, false) + execute(mem, lib, false, output) } -class SplitDepth2048x8_r_mw extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x8-r-mw.json") - val lib = new File(macroDir, "lib-1024x8-r-mw.json") - val v = new File(testDir, "split_depth_2048x8_r_mw.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input W0A : UInt<11> - input W0I : UInt<8> - input W0E : UInt<1> - input W0M : UInt<1> - input clock : Clock - input R0A : UInt<11> - output R0O : UInt<8> +class SplitDepth32768x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 8 + override lazy val mem_depth = 32768 + override lazy val lib_depth = 1024 - node W0A_sel = bits(W0A, 10, 10) - node R0A_sel = bits(R0A, 10, 10) - inst mem_0_0 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.W0A <= W0A - mem_0_0.W0I <= bits(W0I, 7, 0) - mem_0_0.W0M <= bits(W0M, 0, 0) - mem_0_0.W0W <= and(UInt<1>("h1"), eq(W0A_sel, UInt<1>("h0"))) - mem_0_0.W0E <= and(W0E, eq(W0A_sel, UInt<1>("h0"))) - mem_0_0.clock <= clock - mem_0_0.R0A <= R0A - node R0O_0_0 = bits(mem_0_0.R0O, 7, 0) - node R0O_0 = R0O_0_0 - inst mem_1_0 of vendor_sram - mem_1_0.clock <= clock - mem_1_0.W0A <= W0A - mem_1_0.W0I <= bits(W0I, 7, 0) - mem_1_0.W0M <= bits(W0M, 0, 0) - mem_1_0.W0W <= and(UInt<1>("h1"), eq(W0A_sel, UInt<1>("h1"))) - mem_1_0.W0E <= and(W0E, eq(W0A_sel, UInt<1>("h1"))) - mem_1_0.clock <= clock - mem_1_0.R0A <= R0A - node R0O_1_0 = bits(mem_1_0.R0O, 7, 0) - node R0O_1 = R0O_1_0 - R0O <= mux(eq(R0A_sel, UInt<1>("h0")), R0O_0, mux(eq(R0A_sel, UInt<1>("h1")), R0O_1, UInt<1>("h0"))) + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} - extmodule vendor_sram : - input clock : Clock - input R0A : UInt<10> - output R0O : UInt<8> - input clock : Clock - input W0A : UInt<10> - input W0I : UInt<8> - input W0E : UInt<1> - input W0W : UInt<1> - input W0M : UInt<1> +class SplitDepth4096x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 8 + override lazy val mem_depth = 4096 + override lazy val lib_depth = 1024 - defname = vendor_sram -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitDepth2048x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 8 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) } +class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 8 + override lazy val mem_depth = 1024 + override lazy val lib_depth = 1024 -class SplitDepth2048x8_mrw_Sleep extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x8-mrw.json") - val lib = new File(macroDir, "lib-1024x8-sleep.json") - val v = new File(testDir, "split_depth_2048x8_sleep.v") + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Non power of two +class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 8 + override lazy val mem_depth = 1024 + override lazy val lib_depth = 1024 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Masked RAMs + +class SplitDepth2048x8_mrw extends MacroCompilerSpec { + val mem = "mem-2048x8-mrw.json" + val lib = "lib-1024x8-mrw.json" + val v = "split_depth_2048x8_mrw.v" val output = """ circuit name_of_sram_module : @@ -237,7 +180,6 @@ circuit name_of_sram_module : node RW0A_sel = bits(RW0A, 10, 10) inst mem_0_0 of vendor_sram - mem_0_0.sleep <= UInt<1>("h0") mem_0_0.clock <= clock mem_0_0.RW0A <= RW0A node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) @@ -247,7 +189,6 @@ circuit name_of_sram_module : mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) node RW0O_0 = RW0O_0_0 inst mem_1_0 of vendor_sram - mem_1_0.sleep <= UInt<1>("h0") mem_1_0.clock <= clock mem_1_0.RW0A <= RW0A node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) @@ -266,10 +207,177 @@ circuit name_of_sram_module : input RW0E : UInt<1> input RW0W : UInt<1> input RW0M : UInt<1> - input sleep : UInt<1> - defname = vendor_sram + defname = vendor_sram """ - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) + compile(mem, lib, v, false) + execute(mem, lib, false, output) } + +//~ class SplitDepth2048x8_n28 extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x8-mrw.json") + //~ val lib = new File(macroDir, "lib-1024x8-n28.json") + //~ val v = new File(testDir, "split_depth_2048x8_n28.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<8> + //~ output RW0O : UInt<8> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<1> + + //~ node RW0A_sel = bits(RW0A, 10, 10) + //~ inst mem_0_0 of vendor_sram + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) + //~ mem_0_0.RW0M <= cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))) + //~ mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) + //~ mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) + //~ node RW0O_0 = RW0O_0_0 + //~ inst mem_1_0 of vendor_sram + //~ mem_1_0.clock <= clock + //~ mem_1_0.RW0A <= RW0A + //~ node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) + //~ mem_1_0.RW0I <= bits(RW0I, 7, 0) + //~ mem_1_0.RW0M <= cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))) + //~ mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) + //~ mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) + //~ node RW0O_1 = RW0O_1_0 + //~ RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<10> + //~ input RW0I : UInt<8> + //~ output RW0O : UInt<8> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<8> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, lib, v, false) + //~ execute(mem, lib, false, output) +//~ } + +//~ class SplitDepth2048x8_r_mw extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x8-r-mw.json") + //~ val lib = new File(macroDir, "lib-1024x8-r-mw.json") + //~ val v = new File(testDir, "split_depth_2048x8_r_mw.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input W0A : UInt<11> + //~ input W0I : UInt<8> + //~ input W0E : UInt<1> + //~ input W0M : UInt<1> + //~ input clock : Clock + //~ input R0A : UInt<11> + //~ output R0O : UInt<8> + + //~ node W0A_sel = bits(W0A, 10, 10) + //~ node R0A_sel = bits(R0A, 10, 10) + //~ inst mem_0_0 of vendor_sram + //~ mem_0_0.clock <= clock + //~ mem_0_0.W0A <= W0A + //~ mem_0_0.W0I <= bits(W0I, 7, 0) + //~ mem_0_0.W0M <= bits(W0M, 0, 0) + //~ mem_0_0.W0W <= and(UInt<1>("h1"), eq(W0A_sel, UInt<1>("h0"))) + //~ mem_0_0.W0E <= and(W0E, eq(W0A_sel, UInt<1>("h0"))) + //~ mem_0_0.clock <= clock + //~ mem_0_0.R0A <= R0A + //~ node R0O_0_0 = bits(mem_0_0.R0O, 7, 0) + //~ node R0O_0 = R0O_0_0 + //~ inst mem_1_0 of vendor_sram + //~ mem_1_0.clock <= clock + //~ mem_1_0.W0A <= W0A + //~ mem_1_0.W0I <= bits(W0I, 7, 0) + //~ mem_1_0.W0M <= bits(W0M, 0, 0) + //~ mem_1_0.W0W <= and(UInt<1>("h1"), eq(W0A_sel, UInt<1>("h1"))) + //~ mem_1_0.W0E <= and(W0E, eq(W0A_sel, UInt<1>("h1"))) + //~ mem_1_0.clock <= clock + //~ mem_1_0.R0A <= R0A + //~ node R0O_1_0 = bits(mem_1_0.R0O, 7, 0) + //~ node R0O_1 = R0O_1_0 + //~ R0O <= mux(eq(R0A_sel, UInt<1>("h0")), R0O_0, mux(eq(R0A_sel, UInt<1>("h1")), R0O_1, UInt<1>("h0"))) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input R0A : UInt<10> + //~ output R0O : UInt<8> + //~ input clock : Clock + //~ input W0A : UInt<10> + //~ input W0I : UInt<8> + //~ input W0E : UInt<1> + //~ input W0W : UInt<1> + //~ input W0M : UInt<1> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, lib, v, false) + //~ execute(mem, lib, false, output) +//~ } + + +//~ class SplitDepth2048x8_mrw_Sleep extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x8-mrw.json") + //~ val lib = new File(macroDir, "lib-1024x8-sleep.json") + //~ val v = new File(testDir, "split_depth_2048x8_sleep.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<8> + //~ output RW0O : UInt<8> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<1> + + //~ node RW0A_sel = bits(RW0A, 10, 10) + //~ inst mem_0_0 of vendor_sram + //~ mem_0_0.sleep <= UInt<1>("h0") + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) + //~ mem_0_0.RW0M <= bits(RW0M, 0, 0) + //~ mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) + //~ mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) + //~ node RW0O_0 = RW0O_0_0 + //~ inst mem_1_0 of vendor_sram + //~ mem_1_0.sleep <= UInt<1>("h0") + //~ mem_1_0.clock <= clock + //~ mem_1_0.RW0A <= RW0A + //~ node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) + //~ mem_1_0.RW0I <= bits(RW0I, 7, 0) + //~ mem_1_0.RW0M <= bits(RW0M, 0, 0) + //~ mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) + //~ mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) + //~ node RW0O_1 = RW0O_1_0 + //~ RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<10> + //~ input RW0I : UInt<8> + //~ output RW0O : UInt<8> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<1> + //~ input sleep : UInt<1> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, lib, v, false) + //~ execute(mem, lib, false, output) +//~ } diff --git a/tapeout/src/test/scala/transforms/macros/SplitWidth.scala b/tapeout/src/test/scala/transforms/macros/SplitWidth.scala index 0e4d638e5..1e172d643 100644 --- a/tapeout/src/test/scala/transforms/macros/SplitWidth.scala +++ b/tapeout/src/test/scala/transforms/macros/SplitWidth.scala @@ -1,468 +1,468 @@ -package barstools.tapeout.transforms.macros - -import java.io.File - -class SplitWidth2048x16_mrw extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x16-mrw.json") - val lib = new File(macroDir, "lib-2048x8-mrw.json") - val v = new File(testDir, "split_width_2048x16_mrw.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<16> - output RW0O : UInt<16> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<2> - - inst mem_0_0 of vendor_sram - inst mem_0_1 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - mem_0_0.RW0I <= bits(RW0I, 7, 0) - mem_0_0.RW0M <= bits(RW0M, 0, 0) - mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_1.clock <= clock - mem_0_1.RW0A <= RW0A - node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) - mem_0_1.RW0I <= bits(RW0I, 15, 8) - mem_0_1.RW0M <= bits(RW0M, 1, 1) - mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) - RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> - - defname = vendor_sram -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} - -class SplitWidth2048x16_mrw_Uneven extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x16-mrw.json") - val lib = new File(macroDir, "lib-2048x10-rw.json") - val v = new File(testDir, "split_width_2048x16_mrw_uneven.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<16> - output RW0O : UInt<16> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<2> - - inst mem_0_0 of vendor_sram - inst mem_0_1 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - mem_0_0.RW0I <= bits(RW0I, 7, 0) - mem_0_0.RW0W <= and(and(RW0W, bits(RW0M, 0, 0)), UInt<1>("h1")) - mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_1.clock <= clock - mem_0_1.RW0A <= RW0A - node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) - mem_0_1.RW0I <= bits(RW0I, 15, 8) - mem_0_1.RW0W <= and(and(RW0W, bits(RW0M, 1, 1)), UInt<1>("h1")) - mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) - RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<10> - output RW0O : UInt<10> - input RW0E : UInt<1> - input RW0W : UInt<1> - - defname = vendor_sram -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} - -class SplitWidth2048x16_mrw_VeryUneven extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x16-mrw-2.json") - val lib = new File(macroDir, "lib-2048x10-rw.json") - val v = new File(testDir, "split_width_2048x16_mrw_very_uneven.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<16> - output RW0O : UInt<16> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<8> - - inst mem_0_0 of vendor_sram - inst mem_0_1 of vendor_sram - inst mem_0_2 of vendor_sram - inst mem_0_3 of vendor_sram - inst mem_0_4 of vendor_sram - inst mem_0_5 of vendor_sram - inst mem_0_6 of vendor_sram - inst mem_0_7 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 1, 0) - mem_0_0.RW0I <= bits(RW0I, 1, 0) - mem_0_0.RW0W <= and(and(RW0W, bits(RW0M, 0, 0)), UInt<1>("h1")) - mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_1.clock <= clock - mem_0_1.RW0A <= RW0A - node RW0O_0_1 = bits(mem_0_1.RW0O, 1, 0) - mem_0_1.RW0I <= bits(RW0I, 3, 2) - mem_0_1.RW0W <= and(and(RW0W, bits(RW0M, 1, 1)), UInt<1>("h1")) - mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_2.clock <= clock - mem_0_2.RW0A <= RW0A - node RW0O_0_2 = bits(mem_0_2.RW0O, 1, 0) - mem_0_2.RW0I <= bits(RW0I, 5, 4) - mem_0_2.RW0W <= and(and(RW0W, bits(RW0M, 2, 2)), UInt<1>("h1")) - mem_0_2.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_3.clock <= clock - mem_0_3.RW0A <= RW0A - node RW0O_0_3 = bits(mem_0_3.RW0O, 1, 0) - mem_0_3.RW0I <= bits(RW0I, 7, 6) - mem_0_3.RW0W <= and(and(RW0W, bits(RW0M, 3, 3)), UInt<1>("h1")) - mem_0_3.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_4.clock <= clock - mem_0_4.RW0A <= RW0A - node RW0O_0_4 = bits(mem_0_4.RW0O, 1, 0) - mem_0_4.RW0I <= bits(RW0I, 9, 8) - mem_0_4.RW0W <= and(and(RW0W, bits(RW0M, 4, 4)), UInt<1>("h1")) - mem_0_4.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_5.clock <= clock - mem_0_5.RW0A <= RW0A - node RW0O_0_5 = bits(mem_0_5.RW0O, 1, 0) - mem_0_5.RW0I <= bits(RW0I, 11, 10) - mem_0_5.RW0W <= and(and(RW0W, bits(RW0M, 5, 5)), UInt<1>("h1")) - mem_0_5.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_6.clock <= clock - mem_0_6.RW0A <= RW0A - node RW0O_0_6 = bits(mem_0_6.RW0O, 1, 0) - mem_0_6.RW0I <= bits(RW0I, 13, 12) - mem_0_6.RW0W <= and(and(RW0W, bits(RW0M, 6, 6)), UInt<1>("h1")) - mem_0_6.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_7.clock <= clock - mem_0_7.RW0A <= RW0A - node RW0O_0_7 = bits(mem_0_7.RW0O, 1, 0) - mem_0_7.RW0I <= bits(RW0I, 15, 14) - mem_0_7.RW0W <= and(and(RW0W, bits(RW0M, 7, 7)), UInt<1>("h1")) - mem_0_7.RW0E <= and(RW0E, UInt<1>("h1")) - node RW0O_0 = cat(RW0O_0_7, cat(RW0O_0_6, cat(RW0O_0_5, cat(RW0O_0_4, cat(RW0O_0_3, cat(RW0O_0_2, cat(RW0O_0_1, RW0O_0_0))))))) - RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<10> - output RW0O : UInt<10> - input RW0E : UInt<1> - input RW0W : UInt<1> - - defname = vendor_sram -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} - -class SplitWidth2048x16_mrw_ReadEnable extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x16-mrw.json") - val lib = new File(macroDir, "lib-2048x8-mrw-re.json") - val v = new File(testDir, "split_width_2048x16_mrw_read_enable.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<16> - output RW0O : UInt<16> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<2> - - inst mem_0_0 of vendor_sram - inst mem_0_1 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - mem_0_0.RW0I <= bits(RW0I, 7, 0) - mem_0_0.RW0R <= not(and(not(RW0W), UInt<1>("h1"))) - mem_0_0.RW0M <= bits(RW0M, 0, 0) - mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_1.clock <= clock - mem_0_1.RW0A <= RW0A - node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) - mem_0_1.RW0I <= bits(RW0I, 15, 8) - mem_0_1.RW0R <= not(and(not(RW0W), UInt<1>("h1"))) - mem_0_1.RW0M <= bits(RW0M, 1, 1) - mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) - RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0R : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> - - defname = vendor_sram -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} - -class SplitWidth2048x16_n28 extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x16-mrw.json") - val lib = new File(macroDir, "lib-2048x16-n28.json") - val v = new File(testDir, "split_width_2048x16_n28.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<16> - output RW0O : UInt<16> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<2> - - inst mem_0_0 of vendor_sram_16 - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 15, 0) - mem_0_0.RW0I <= bits(RW0I, 15, 0) - mem_0_0.RW0M <= cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))))))))))) - mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - node RW0O_0 = RW0O_0_0 - RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - extmodule vendor_sram_16 : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<16> - output RW0O : UInt<16> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<16> - - defname = vendor_sram_16 -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} - -class SplitWidth2048x20_mrw_UnevenMask extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x20-mrw.json") - val lib = new File(macroDir, "lib-2048x8-mrw.json") - val v = new File(testDir, "split_width_2048x20_mrw_uneven_mask.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<20> - output RW0O : UInt<20> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<2> - - inst mem_0_0 of vendor_sram - inst mem_0_1 of vendor_sram - inst mem_0_2 of vendor_sram - inst mem_0_3 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - mem_0_0.RW0I <= bits(RW0I, 7, 0) - mem_0_0.RW0M <= bits(RW0M, 0, 0) - mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_1.clock <= clock - mem_0_1.RW0A <= RW0A - node RW0O_0_1 = bits(mem_0_1.RW0O, 1, 0) - mem_0_1.RW0I <= bits(RW0I, 9, 8) - mem_0_1.RW0M <= bits(RW0M, 0, 0) - mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_2.clock <= clock - mem_0_2.RW0A <= RW0A - node RW0O_0_2 = bits(mem_0_2.RW0O, 7, 0) - mem_0_2.RW0I <= bits(RW0I, 17, 10) - mem_0_2.RW0M <= bits(RW0M, 1, 1) - mem_0_2.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_2.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_3.clock <= clock - mem_0_3.RW0A <= RW0A - node RW0O_0_3 = bits(mem_0_3.RW0O, 1, 0) - mem_0_3.RW0I <= bits(RW0I, 19, 18) - mem_0_3.RW0M <= bits(RW0M, 1, 1) - mem_0_3.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_3.RW0E <= and(RW0E, UInt<1>("h1")) - node RW0O_0 = cat(RW0O_0_3, cat(RW0O_0_2, cat(RW0O_0_1, RW0O_0_0))) - RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> - - defname = vendor_sram -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} - -class SplitWidth24x52 extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-24x52-r-w.json") - val lib = new File(macroDir, "lib-32x32-2rw.json") - val v = new File(testDir, "split_width_24x52.v") - val output = -""" -circuit entries_info_ext : - module entries_info_ext : - input R0_clk : Clock - input R0_addr : UInt<5> - output R0_data : UInt<52> - input R0_en : UInt<1> - input W0_clk : Clock - input W0_addr : UInt<5> - input W0_data : UInt<52> - input W0_en : UInt<1> - - inst mem_0_0 of SRAM2RW32x32 - inst mem_0_1 of SRAM2RW32x32 - mem_0_0.CE1 <= W0_clk - mem_0_0.A1 <= W0_addr - mem_0_0.I1 <= bits(W0_data, 31, 0) - mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_1.CE1 <= W0_clk - mem_0_1.A1 <= W0_addr - mem_0_1.I1 <= bits(W0_data, 51, 32) - mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_0.CE2 <= R0_clk - mem_0_0.A2 <= R0_addr - node R0_data_0_0 = bits(mem_0_0.O2, 31, 0) - mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - mem_0_1.CE2 <= R0_clk - mem_0_1.A2 <= R0_addr - node R0_data_0_1 = bits(mem_0_1.O2, 19, 0) - mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) - R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) - - extmodule SRAM2RW32x32 : - input CE1 : Clock - input A1 : UInt<5> - input I1 : UInt<32> - output O1 : UInt<32> - input CSB1 : UInt<1> - input OEB1 : UInt<1> - input WEB1 : UInt<1> - input CE2 : Clock - input A2 : UInt<5> - input I2 : UInt<32> - output O2 : UInt<32> - input CSB2 : UInt<1> - input OEB2 : UInt<1> - input WEB2 : UInt<1> - - defname = SRAM2RW32x32 -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} - -class SplitWidth32x160 extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-32x160-mrw.json") - val lib = new File(macroDir, "lib-32x80-mrw.json") - val v = new File(testDir, "split_width_32x160.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<5> - input RW0I : UInt<160> - output RW0O : UInt<160> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<8> - - inst mem_0_0 of vendor_sram - inst mem_0_1 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 79, 0) - mem_0_0.RW0I <= bits(RW0I, 79, 0) - mem_0_0.RW0M <= cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) - mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - mem_0_1.clock <= clock - mem_0_1.RW0A <= RW0A - node RW0O_0_1 = bits(mem_0_1.RW0O, 79, 0) - mem_0_1.RW0I <= bits(RW0I, 159, 80) - mem_0_1.RW0M <= cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), bits(RW0M, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) - mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) - mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) - RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<5> - input RW0I : UInt<80> - output RW0O : UInt<80> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<80> - - defname = vendor_sram -""" - compile(mem, Some(lib), v, false) - execute(Some(mem), Some(lib), false, output) -} +//~ package barstools.tapeout.transforms.macros.test + +//~ import java.io.File + +//~ class SplitWidth2048x16_mrw extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") + //~ val lib = new File(macroDir, "lib-2048x8-mrw.json") + //~ val v = new File(testDir, "split_width_2048x16_mrw.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<16> + //~ output RW0O : UInt<16> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<2> + + //~ inst mem_0_0 of vendor_sram + //~ inst mem_0_1 of vendor_sram + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) + //~ mem_0_0.RW0M <= bits(RW0M, 0, 0) + //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_1.clock <= clock + //~ mem_0_1.RW0A <= RW0A + //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) + //~ mem_0_1.RW0I <= bits(RW0I, 15, 8) + //~ mem_0_1.RW0M <= bits(RW0M, 1, 1) + //~ mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + //~ node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) + //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<8> + //~ output RW0O : UInt<8> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<1> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, Some(lib), v, false) + //~ execute(Some(mem), Some(lib), false, output) +//~ } + +//~ class SplitWidth2048x16_mrw_Uneven extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") + //~ val lib = new File(macroDir, "lib-2048x10-rw.json") + //~ val v = new File(testDir, "split_width_2048x16_mrw_uneven.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<16> + //~ output RW0O : UInt<16> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<2> + + //~ inst mem_0_0 of vendor_sram + //~ inst mem_0_1 of vendor_sram + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) + //~ mem_0_0.RW0W <= and(and(RW0W, bits(RW0M, 0, 0)), UInt<1>("h1")) + //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_1.clock <= clock + //~ mem_0_1.RW0A <= RW0A + //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) + //~ mem_0_1.RW0I <= bits(RW0I, 15, 8) + //~ mem_0_1.RW0W <= and(and(RW0W, bits(RW0M, 1, 1)), UInt<1>("h1")) + //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + //~ node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) + //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<10> + //~ output RW0O : UInt<10> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, Some(lib), v, false) + //~ execute(Some(mem), Some(lib), false, output) +//~ } + +//~ class SplitWidth2048x16_mrw_VeryUneven extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x16-mrw-2.json") + //~ val lib = new File(macroDir, "lib-2048x10-rw.json") + //~ val v = new File(testDir, "split_width_2048x16_mrw_very_uneven.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<16> + //~ output RW0O : UInt<16> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<8> + + //~ inst mem_0_0 of vendor_sram + //~ inst mem_0_1 of vendor_sram + //~ inst mem_0_2 of vendor_sram + //~ inst mem_0_3 of vendor_sram + //~ inst mem_0_4 of vendor_sram + //~ inst mem_0_5 of vendor_sram + //~ inst mem_0_6 of vendor_sram + //~ inst mem_0_7 of vendor_sram + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 1, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 1, 0) + //~ mem_0_0.RW0W <= and(and(RW0W, bits(RW0M, 0, 0)), UInt<1>("h1")) + //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_1.clock <= clock + //~ mem_0_1.RW0A <= RW0A + //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 1, 0) + //~ mem_0_1.RW0I <= bits(RW0I, 3, 2) + //~ mem_0_1.RW0W <= and(and(RW0W, bits(RW0M, 1, 1)), UInt<1>("h1")) + //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_2.clock <= clock + //~ mem_0_2.RW0A <= RW0A + //~ node RW0O_0_2 = bits(mem_0_2.RW0O, 1, 0) + //~ mem_0_2.RW0I <= bits(RW0I, 5, 4) + //~ mem_0_2.RW0W <= and(and(RW0W, bits(RW0M, 2, 2)), UInt<1>("h1")) + //~ mem_0_2.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_3.clock <= clock + //~ mem_0_3.RW0A <= RW0A + //~ node RW0O_0_3 = bits(mem_0_3.RW0O, 1, 0) + //~ mem_0_3.RW0I <= bits(RW0I, 7, 6) + //~ mem_0_3.RW0W <= and(and(RW0W, bits(RW0M, 3, 3)), UInt<1>("h1")) + //~ mem_0_3.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_4.clock <= clock + //~ mem_0_4.RW0A <= RW0A + //~ node RW0O_0_4 = bits(mem_0_4.RW0O, 1, 0) + //~ mem_0_4.RW0I <= bits(RW0I, 9, 8) + //~ mem_0_4.RW0W <= and(and(RW0W, bits(RW0M, 4, 4)), UInt<1>("h1")) + //~ mem_0_4.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_5.clock <= clock + //~ mem_0_5.RW0A <= RW0A + //~ node RW0O_0_5 = bits(mem_0_5.RW0O, 1, 0) + //~ mem_0_5.RW0I <= bits(RW0I, 11, 10) + //~ mem_0_5.RW0W <= and(and(RW0W, bits(RW0M, 5, 5)), UInt<1>("h1")) + //~ mem_0_5.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_6.clock <= clock + //~ mem_0_6.RW0A <= RW0A + //~ node RW0O_0_6 = bits(mem_0_6.RW0O, 1, 0) + //~ mem_0_6.RW0I <= bits(RW0I, 13, 12) + //~ mem_0_6.RW0W <= and(and(RW0W, bits(RW0M, 6, 6)), UInt<1>("h1")) + //~ mem_0_6.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_7.clock <= clock + //~ mem_0_7.RW0A <= RW0A + //~ node RW0O_0_7 = bits(mem_0_7.RW0O, 1, 0) + //~ mem_0_7.RW0I <= bits(RW0I, 15, 14) + //~ mem_0_7.RW0W <= and(and(RW0W, bits(RW0M, 7, 7)), UInt<1>("h1")) + //~ mem_0_7.RW0E <= and(RW0E, UInt<1>("h1")) + //~ node RW0O_0 = cat(RW0O_0_7, cat(RW0O_0_6, cat(RW0O_0_5, cat(RW0O_0_4, cat(RW0O_0_3, cat(RW0O_0_2, cat(RW0O_0_1, RW0O_0_0))))))) + //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<10> + //~ output RW0O : UInt<10> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, Some(lib), v, false) + //~ execute(Some(mem), Some(lib), false, output) +//~ } + +//~ class SplitWidth2048x16_mrw_ReadEnable extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") + //~ val lib = new File(macroDir, "lib-2048x8-mrw-re.json") + //~ val v = new File(testDir, "split_width_2048x16_mrw_read_enable.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<16> + //~ output RW0O : UInt<16> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<2> + + //~ inst mem_0_0 of vendor_sram + //~ inst mem_0_1 of vendor_sram + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) + //~ mem_0_0.RW0R <= not(and(not(RW0W), UInt<1>("h1"))) + //~ mem_0_0.RW0M <= bits(RW0M, 0, 0) + //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_1.clock <= clock + //~ mem_0_1.RW0A <= RW0A + //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) + //~ mem_0_1.RW0I <= bits(RW0I, 15, 8) + //~ mem_0_1.RW0R <= not(and(not(RW0W), UInt<1>("h1"))) + //~ mem_0_1.RW0M <= bits(RW0M, 1, 1) + //~ mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + //~ node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) + //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<8> + //~ output RW0O : UInt<8> + //~ input RW0E : UInt<1> + //~ input RW0R : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<1> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, Some(lib), v, false) + //~ execute(Some(mem), Some(lib), false, output) +//~ } + +//~ class SplitWidth2048x16_n28 extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") + //~ val lib = new File(macroDir, "lib-2048x16-n28.json") + //~ val v = new File(testDir, "split_width_2048x16_n28.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<16> + //~ output RW0O : UInt<16> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<2> + + //~ inst mem_0_0 of vendor_sram_16 + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 15, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 15, 0) + //~ mem_0_0.RW0M <= cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))))))))))) + //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + //~ node RW0O_0 = RW0O_0_0 + //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + //~ extmodule vendor_sram_16 : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<16> + //~ output RW0O : UInt<16> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<16> + + //~ defname = vendor_sram_16 +//~ """ + //~ compile(mem, Some(lib), v, false) + //~ execute(Some(mem), Some(lib), false, output) +//~ } + +//~ class SplitWidth2048x20_mrw_UnevenMask extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x20-mrw.json") + //~ val lib = new File(macroDir, "lib-2048x8-mrw.json") + //~ val v = new File(testDir, "split_width_2048x20_mrw_uneven_mask.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<20> + //~ output RW0O : UInt<20> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<2> + + //~ inst mem_0_0 of vendor_sram + //~ inst mem_0_1 of vendor_sram + //~ inst mem_0_2 of vendor_sram + //~ inst mem_0_3 of vendor_sram + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) + //~ mem_0_0.RW0M <= bits(RW0M, 0, 0) + //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_1.clock <= clock + //~ mem_0_1.RW0A <= RW0A + //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 1, 0) + //~ mem_0_1.RW0I <= bits(RW0I, 9, 8) + //~ mem_0_1.RW0M <= bits(RW0M, 0, 0) + //~ mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_2.clock <= clock + //~ mem_0_2.RW0A <= RW0A + //~ node RW0O_0_2 = bits(mem_0_2.RW0O, 7, 0) + //~ mem_0_2.RW0I <= bits(RW0I, 17, 10) + //~ mem_0_2.RW0M <= bits(RW0M, 1, 1) + //~ mem_0_2.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_2.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_3.clock <= clock + //~ mem_0_3.RW0A <= RW0A + //~ node RW0O_0_3 = bits(mem_0_3.RW0O, 1, 0) + //~ mem_0_3.RW0I <= bits(RW0I, 19, 18) + //~ mem_0_3.RW0M <= bits(RW0M, 1, 1) + //~ mem_0_3.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_3.RW0E <= and(RW0E, UInt<1>("h1")) + //~ node RW0O_0 = cat(RW0O_0_3, cat(RW0O_0_2, cat(RW0O_0_1, RW0O_0_0))) + //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<8> + //~ output RW0O : UInt<8> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<1> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, Some(lib), v, false) + //~ execute(Some(mem), Some(lib), false, output) +//~ } + +//~ class SplitWidth24x52 extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-24x52-r-w.json") + //~ val lib = new File(macroDir, "lib-32x32-2rw.json") + //~ val v = new File(testDir, "split_width_24x52.v") + //~ val output = +//~ """ +//~ circuit entries_info_ext : + //~ module entries_info_ext : + //~ input R0_clk : Clock + //~ input R0_addr : UInt<5> + //~ output R0_data : UInt<52> + //~ input R0_en : UInt<1> + //~ input W0_clk : Clock + //~ input W0_addr : UInt<5> + //~ input W0_data : UInt<52> + //~ input W0_en : UInt<1> + + //~ inst mem_0_0 of SRAM2RW32x32 + //~ inst mem_0_1 of SRAM2RW32x32 + //~ mem_0_0.CE1 <= W0_clk + //~ mem_0_0.A1 <= W0_addr + //~ mem_0_0.I1 <= bits(W0_data, 31, 0) + //~ mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_0.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + //~ mem_0_1.CE1 <= W0_clk + //~ mem_0_1.A1 <= W0_addr + //~ mem_0_1.I1 <= bits(W0_data, 51, 32) + //~ mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_1.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + //~ mem_0_0.CE2 <= R0_clk + //~ mem_0_0.A2 <= R0_addr + //~ node R0_data_0_0 = bits(mem_0_0.O2, 31, 0) + //~ mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + //~ mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_0.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + //~ mem_0_1.CE2 <= R0_clk + //~ mem_0_1.A2 <= R0_addr + //~ node R0_data_0_1 = bits(mem_0_1.O2, 19, 0) + //~ mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + //~ mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + //~ mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + //~ node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) + //~ R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) + + //~ extmodule SRAM2RW32x32 : + //~ input CE1 : Clock + //~ input A1 : UInt<5> + //~ input I1 : UInt<32> + //~ output O1 : UInt<32> + //~ input CSB1 : UInt<1> + //~ input OEB1 : UInt<1> + //~ input WEB1 : UInt<1> + //~ input CE2 : Clock + //~ input A2 : UInt<5> + //~ input I2 : UInt<32> + //~ output O2 : UInt<32> + //~ input CSB2 : UInt<1> + //~ input OEB2 : UInt<1> + //~ input WEB2 : UInt<1> + + //~ defname = SRAM2RW32x32 +//~ """ + //~ compile(mem, Some(lib), v, false) + //~ execute(Some(mem), Some(lib), false, output) +//~ } + +//~ class SplitWidth32x160 extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-32x160-mrw.json") + //~ val lib = new File(macroDir, "lib-32x80-mrw.json") + //~ val v = new File(testDir, "split_width_32x160.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<5> + //~ input RW0I : UInt<160> + //~ output RW0O : UInt<160> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<8> + + //~ inst mem_0_0 of vendor_sram + //~ inst mem_0_1 of vendor_sram + //~ mem_0_0.clock <= clock + //~ mem_0_0.RW0A <= RW0A + //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 79, 0) + //~ mem_0_0.RW0I <= bits(RW0I, 79, 0) + //~ mem_0_0.RW0M <= cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) + //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) + //~ mem_0_1.clock <= clock + //~ mem_0_1.RW0A <= RW0A + //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 79, 0) + //~ mem_0_1.RW0I <= bits(RW0I, 159, 80) + //~ mem_0_1.RW0M <= cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), bits(RW0M, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) + //~ mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) + //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) + //~ node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) + //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) + + //~ extmodule vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<5> + //~ input RW0I : UInt<80> + //~ output RW0O : UInt<80> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<80> + + //~ defname = vendor_sram +//~ """ + //~ compile(mem, Some(lib), v, false) + //~ execute(Some(mem), Some(lib), false, output) +//~ } diff --git a/tapeout/src/test/scala/transforms/macros/SynFlops.scala b/tapeout/src/test/scala/transforms/macros/SynFlops.scala index 981e5e8d9..2b3ff531b 100644 --- a/tapeout/src/test/scala/transforms/macros/SynFlops.scala +++ b/tapeout/src/test/scala/transforms/macros/SynFlops.scala @@ -1,333 +1,333 @@ -package barstools.tapeout.transforms.macros +//~ package barstools.tapeout.transforms.macros -import java.io.File +//~ import java.io.File -class Synflops2048x16_mrw extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x16-mrw.json") - val v = new File(testDir, "syn_flops_2048x16_mrw.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<16> - output RW0O : UInt<16> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<2> +//~ class Synflops2048x16_mrw extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") + //~ val v = new File(testDir, "syn_flops_2048x16_mrw.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<16> + //~ output RW0O : UInt<16> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<2> - mem ram : - data-type => UInt<8>[2] - depth => 2048 - read-latency => 0 - write-latency => 1 - reader => R_0 - writer => W_0 - read-under-write => undefined - reg R_0_addr_reg : UInt<11>, clock with : - reset => (UInt<1>("h0"), R_0_addr_reg) - ram.R_0.clk <= clock - ram.R_0.addr <= R_0_addr_reg - ram.R_0.en <= RW0E - RW0O <= cat(ram.R_0.data[1], ram.R_0.data[0]) - R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) - ram.W_0.clk <= clock - ram.W_0.addr <= RW0A - ram.W_0.en <= and(RW0E, RW0W) - ram.W_0.data[0] <= bits(RW0I, 7, 0) - ram.W_0.data[1] <= bits(RW0I, 15, 8) - ram.W_0.mask[0] <= bits(RW0M, 0, 0) - ram.W_0.mask[1] <= bits(RW0M, 1, 1) -""" - compile(mem, None, v, true) - execute(Some(mem), None, true, output) -} + //~ mem ram : + //~ data-type => UInt<8>[2] + //~ depth => 2048 + //~ read-latency => 0 + //~ write-latency => 1 + //~ reader => R_0 + //~ writer => W_0 + //~ read-under-write => undefined + //~ reg R_0_addr_reg : UInt<11>, clock with : + //~ reset => (UInt<1>("h0"), R_0_addr_reg) + //~ ram.R_0.clk <= clock + //~ ram.R_0.addr <= R_0_addr_reg + //~ ram.R_0.en <= RW0E + //~ RW0O <= cat(ram.R_0.data[1], ram.R_0.data[0]) + //~ R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) + //~ ram.W_0.clk <= clock + //~ ram.W_0.addr <= RW0A + //~ ram.W_0.en <= and(RW0E, RW0W) + //~ ram.W_0.data[0] <= bits(RW0I, 7, 0) + //~ ram.W_0.data[1] <= bits(RW0I, 15, 8) + //~ ram.W_0.mask[0] <= bits(RW0M, 0, 0) + //~ ram.W_0.mask[1] <= bits(RW0M, 1, 1) +//~ """ + //~ compile(mem, None, v, true) + //~ execute(Some(mem), None, true, output) +//~ } -class Synflops2048x8_r_mw extends MacroCompilerSpec { - val mem = new File(macroDir, "mem-2048x8-r-mw.json") - val v = new File(testDir, "syn_flops_2048x8_r_mw.v") - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input W0A : UInt<11> - input W0I : UInt<8> - input W0E : UInt<1> - input W0M : UInt<1> - input clock : Clock - input R0A : UInt<11> - output R0O : UInt<8> +//~ class Synflops2048x8_r_mw extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "mem-2048x8-r-mw.json") + //~ val v = new File(testDir, "syn_flops_2048x8_r_mw.v") + //~ val output = +//~ """ +//~ circuit name_of_sram_module : + //~ module name_of_sram_module : + //~ input clock : Clock + //~ input W0A : UInt<11> + //~ input W0I : UInt<8> + //~ input W0E : UInt<1> + //~ input W0M : UInt<1> + //~ input clock : Clock + //~ input R0A : UInt<11> + //~ output R0O : UInt<8> - mem ram : - data-type => UInt<8>[1] - depth => 2048 - read-latency => 0 - write-latency => 1 - reader => R_0 - writer => W_0 - read-under-write => undefined - reg R_0_addr_reg : UInt<11>, clock with : - reset => (UInt<1>("h0"), R_0_addr_reg) - ram.R_0.clk <= clock - ram.R_0.addr <= R_0_addr_reg - ram.R_0.en <= UInt<1>("h1") - R0O <= ram.R_0.data[0] - R_0_addr_reg <= mux(UInt<1>("h1"), R0A, R_0_addr_reg) - ram.W_0.clk <= clock - ram.W_0.addr <= W0A - ram.W_0.en <= W0E - ram.W_0.data[0] <= bits(W0I, 7, 0) - ram.W_0.mask[0] <= bits(W0M, 0, 0) -""" - compile(mem, None, v, true) - execute(Some(mem), None, true, output) -} + //~ mem ram : + //~ data-type => UInt<8>[1] + //~ depth => 2048 + //~ read-latency => 0 + //~ write-latency => 1 + //~ reader => R_0 + //~ writer => W_0 + //~ read-under-write => undefined + //~ reg R_0_addr_reg : UInt<11>, clock with : + //~ reset => (UInt<1>("h0"), R_0_addr_reg) + //~ ram.R_0.clk <= clock + //~ ram.R_0.addr <= R_0_addr_reg + //~ ram.R_0.en <= UInt<1>("h1") + //~ R0O <= ram.R_0.data[0] + //~ R_0_addr_reg <= mux(UInt<1>("h1"), R0A, R_0_addr_reg) + //~ ram.W_0.clk <= clock + //~ ram.W_0.addr <= W0A + //~ ram.W_0.en <= W0E + //~ ram.W_0.data[0] <= bits(W0I, 7, 0) + //~ ram.W_0.mask[0] <= bits(W0M, 0, 0) +//~ """ + //~ compile(mem, None, v, true) + //~ execute(Some(mem), None, true, output) +//~ } -class Synflops2048x10_rw extends MacroCompilerSpec { - val mem = new File(macroDir, "lib-2048x10-rw.json") - val v = new File(testDir, "syn_flops_2048x10_rw.v") - val output = -""" -circuit vendor_sram : - module vendor_sram : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<10> - output RW0O : UInt<10> - input RW0E : UInt<1> - input RW0W : UInt<1> +//~ class Synflops2048x10_rw extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "lib-2048x10-rw.json") + //~ val v = new File(testDir, "syn_flops_2048x10_rw.v") + //~ val output = +//~ """ +//~ circuit vendor_sram : + //~ module vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<10> + //~ output RW0O : UInt<10> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> - mem ram : - data-type => UInt<10> - depth => 2048 - read-latency => 0 - write-latency => 1 - reader => R_0 - writer => W_0 - read-under-write => undefined - reg R_0_addr_reg : UInt<11>, clock with : - reset => (UInt<1>("h0"), R_0_addr_reg) - ram.R_0.clk <= clock - ram.R_0.addr <= R_0_addr_reg - ram.R_0.en <= RW0E - RW0O <= ram.R_0.data - R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) - ram.W_0.clk <= clock - ram.W_0.addr <= RW0A - ram.W_0.en <= and(RW0E, RW0W) - ram.W_0.data <= RW0I - ram.W_0.mask <= UInt<1>("h1") -""" - compile(mem, None, v, true) - execute(Some(mem), None, true, output) -} + //~ mem ram : + //~ data-type => UInt<10> + //~ depth => 2048 + //~ read-latency => 0 + //~ write-latency => 1 + //~ reader => R_0 + //~ writer => W_0 + //~ read-under-write => undefined + //~ reg R_0_addr_reg : UInt<11>, clock with : + //~ reset => (UInt<1>("h0"), R_0_addr_reg) + //~ ram.R_0.clk <= clock + //~ ram.R_0.addr <= R_0_addr_reg + //~ ram.R_0.en <= RW0E + //~ RW0O <= ram.R_0.data + //~ R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) + //~ ram.W_0.clk <= clock + //~ ram.W_0.addr <= RW0A + //~ ram.W_0.en <= and(RW0E, RW0W) + //~ ram.W_0.data <= RW0I + //~ ram.W_0.mask <= UInt<1>("h1") +//~ """ + //~ compile(mem, None, v, true) + //~ execute(Some(mem), None, true, output) +//~ } -class Synflops2048x8_mrw_re extends MacroCompilerSpec { - val mem = new File(macroDir, "lib-2048x8-mrw-re.json") - val v = new File(testDir, "syn_flops_2048x8_mrw_re.v") - val output = -""" -circuit vendor_sram : - module vendor_sram : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0R : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> +//~ class Synflops2048x8_mrw_re extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "lib-2048x8-mrw-re.json") + //~ val v = new File(testDir, "syn_flops_2048x8_mrw_re.v") + //~ val output = +//~ """ +//~ circuit vendor_sram : + //~ module vendor_sram : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<8> + //~ output RW0O : UInt<8> + //~ input RW0E : UInt<1> + //~ input RW0R : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<1> - mem ram : - data-type => UInt<8>[1] - depth => 2048 - read-latency => 0 - write-latency => 1 - reader => R_0 - writer => W_0 - read-under-write => undefined - reg R_0_addr_reg : UInt<11>, clock with : - reset => (UInt<1>("h0"), R_0_addr_reg) - ram.R_0.clk <= clock - ram.R_0.addr <= R_0_addr_reg - ram.R_0.en <= and(RW0E, not(RW0R)) - RW0O <= ram.R_0.data[0] - R_0_addr_reg <= mux(and(RW0E, not(RW0R)), RW0A, R_0_addr_reg) - ram.W_0.clk <= clock - ram.W_0.addr <= RW0A - ram.W_0.en <= and(RW0E, RW0W) - ram.W_0.data[0] <= bits(RW0I, 7, 0) - ram.W_0.mask[0] <= bits(RW0M, 0, 0) -""" - compile(mem, None, v, true) - execute(Some(mem), None, true, output) -} + //~ mem ram : + //~ data-type => UInt<8>[1] + //~ depth => 2048 + //~ read-latency => 0 + //~ write-latency => 1 + //~ reader => R_0 + //~ writer => W_0 + //~ read-under-write => undefined + //~ reg R_0_addr_reg : UInt<11>, clock with : + //~ reset => (UInt<1>("h0"), R_0_addr_reg) + //~ ram.R_0.clk <= clock + //~ ram.R_0.addr <= R_0_addr_reg + //~ ram.R_0.en <= and(RW0E, not(RW0R)) + //~ RW0O <= ram.R_0.data[0] + //~ R_0_addr_reg <= mux(and(RW0E, not(RW0R)), RW0A, R_0_addr_reg) + //~ ram.W_0.clk <= clock + //~ ram.W_0.addr <= RW0A + //~ ram.W_0.en <= and(RW0E, RW0W) + //~ ram.W_0.data[0] <= bits(RW0I, 7, 0) + //~ ram.W_0.mask[0] <= bits(RW0M, 0, 0) +//~ """ + //~ compile(mem, None, v, true) + //~ execute(Some(mem), None, true, output) +//~ } -class Synflops2048x16_n28 extends MacroCompilerSpec { - val mem = new File(macroDir, "lib-2048x16-n28.json") - val v = new File(testDir, "syn_flops_2048x16_n28.v") - val output = -""" -circuit vendor_sram_4 : - module vendor_sram_16 : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<16> - output RW0O : UInt<16> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<16> +//~ class Synflops2048x16_n28 extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "lib-2048x16-n28.json") + //~ val v = new File(testDir, "syn_flops_2048x16_n28.v") + //~ val output = +//~ """ +//~ circuit vendor_sram_4 : + //~ module vendor_sram_16 : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<16> + //~ output RW0O : UInt<16> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<16> - mem ram : - data-type => UInt<1>[16] - depth => 2048 - read-latency => 0 - write-latency => 1 - reader => R_0 - writer => W_0 - read-under-write => undefined - reg R_0_addr_reg : UInt<11>, clock with : - reset => (UInt<1>("h0"), R_0_addr_reg) - ram.R_0.clk <= clock - ram.R_0.addr <= R_0_addr_reg - ram.R_0.en <= RW0E - RW0O <= cat(ram.R_0.data[15], cat(ram.R_0.data[14], cat(ram.R_0.data[13], cat(ram.R_0.data[12], cat(ram.R_0.data[11], cat(ram.R_0.data[10], cat(ram.R_0.data[9], cat(ram.R_0.data[8], cat(ram.R_0.data[7], cat(ram.R_0.data[6], cat(ram.R_0.data[5], cat(ram.R_0.data[4], cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))))))))))))))) - R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) - ram.W_0.clk <= clock - ram.W_0.addr <= RW0A - ram.W_0.en <= and(RW0E, RW0W) - ram.W_0.data[0] <= bits(RW0I, 0, 0) - ram.W_0.data[1] <= bits(RW0I, 1, 1) - ram.W_0.data[2] <= bits(RW0I, 2, 2) - ram.W_0.data[3] <= bits(RW0I, 3, 3) - ram.W_0.data[4] <= bits(RW0I, 4, 4) - ram.W_0.data[5] <= bits(RW0I, 5, 5) - ram.W_0.data[6] <= bits(RW0I, 6, 6) - ram.W_0.data[7] <= bits(RW0I, 7, 7) - ram.W_0.data[8] <= bits(RW0I, 8, 8) - ram.W_0.data[9] <= bits(RW0I, 9, 9) - ram.W_0.data[10] <= bits(RW0I, 10, 10) - ram.W_0.data[11] <= bits(RW0I, 11, 11) - ram.W_0.data[12] <= bits(RW0I, 12, 12) - ram.W_0.data[13] <= bits(RW0I, 13, 13) - ram.W_0.data[14] <= bits(RW0I, 14, 14) - ram.W_0.data[15] <= bits(RW0I, 15, 15) - ram.W_0.mask[0] <= bits(RW0M, 0, 0) - ram.W_0.mask[1] <= bits(RW0M, 1, 1) - ram.W_0.mask[2] <= bits(RW0M, 2, 2) - ram.W_0.mask[3] <= bits(RW0M, 3, 3) - ram.W_0.mask[4] <= bits(RW0M, 4, 4) - ram.W_0.mask[5] <= bits(RW0M, 5, 5) - ram.W_0.mask[6] <= bits(RW0M, 6, 6) - ram.W_0.mask[7] <= bits(RW0M, 7, 7) - ram.W_0.mask[8] <= bits(RW0M, 8, 8) - ram.W_0.mask[9] <= bits(RW0M, 9, 9) - ram.W_0.mask[10] <= bits(RW0M, 10, 10) - ram.W_0.mask[11] <= bits(RW0M, 11, 11) - ram.W_0.mask[12] <= bits(RW0M, 12, 12) - ram.W_0.mask[13] <= bits(RW0M, 13, 13) - ram.W_0.mask[14] <= bits(RW0M, 14, 14) - ram.W_0.mask[15] <= bits(RW0M, 15, 15) + //~ mem ram : + //~ data-type => UInt<1>[16] + //~ depth => 2048 + //~ read-latency => 0 + //~ write-latency => 1 + //~ reader => R_0 + //~ writer => W_0 + //~ read-under-write => undefined + //~ reg R_0_addr_reg : UInt<11>, clock with : + //~ reset => (UInt<1>("h0"), R_0_addr_reg) + //~ ram.R_0.clk <= clock + //~ ram.R_0.addr <= R_0_addr_reg + //~ ram.R_0.en <= RW0E + //~ RW0O <= cat(ram.R_0.data[15], cat(ram.R_0.data[14], cat(ram.R_0.data[13], cat(ram.R_0.data[12], cat(ram.R_0.data[11], cat(ram.R_0.data[10], cat(ram.R_0.data[9], cat(ram.R_0.data[8], cat(ram.R_0.data[7], cat(ram.R_0.data[6], cat(ram.R_0.data[5], cat(ram.R_0.data[4], cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))))))))))))))) + //~ R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) + //~ ram.W_0.clk <= clock + //~ ram.W_0.addr <= RW0A + //~ ram.W_0.en <= and(RW0E, RW0W) + //~ ram.W_0.data[0] <= bits(RW0I, 0, 0) + //~ ram.W_0.data[1] <= bits(RW0I, 1, 1) + //~ ram.W_0.data[2] <= bits(RW0I, 2, 2) + //~ ram.W_0.data[3] <= bits(RW0I, 3, 3) + //~ ram.W_0.data[4] <= bits(RW0I, 4, 4) + //~ ram.W_0.data[5] <= bits(RW0I, 5, 5) + //~ ram.W_0.data[6] <= bits(RW0I, 6, 6) + //~ ram.W_0.data[7] <= bits(RW0I, 7, 7) + //~ ram.W_0.data[8] <= bits(RW0I, 8, 8) + //~ ram.W_0.data[9] <= bits(RW0I, 9, 9) + //~ ram.W_0.data[10] <= bits(RW0I, 10, 10) + //~ ram.W_0.data[11] <= bits(RW0I, 11, 11) + //~ ram.W_0.data[12] <= bits(RW0I, 12, 12) + //~ ram.W_0.data[13] <= bits(RW0I, 13, 13) + //~ ram.W_0.data[14] <= bits(RW0I, 14, 14) + //~ ram.W_0.data[15] <= bits(RW0I, 15, 15) + //~ ram.W_0.mask[0] <= bits(RW0M, 0, 0) + //~ ram.W_0.mask[1] <= bits(RW0M, 1, 1) + //~ ram.W_0.mask[2] <= bits(RW0M, 2, 2) + //~ ram.W_0.mask[3] <= bits(RW0M, 3, 3) + //~ ram.W_0.mask[4] <= bits(RW0M, 4, 4) + //~ ram.W_0.mask[5] <= bits(RW0M, 5, 5) + //~ ram.W_0.mask[6] <= bits(RW0M, 6, 6) + //~ ram.W_0.mask[7] <= bits(RW0M, 7, 7) + //~ ram.W_0.mask[8] <= bits(RW0M, 8, 8) + //~ ram.W_0.mask[9] <= bits(RW0M, 9, 9) + //~ ram.W_0.mask[10] <= bits(RW0M, 10, 10) + //~ ram.W_0.mask[11] <= bits(RW0M, 11, 11) + //~ ram.W_0.mask[12] <= bits(RW0M, 12, 12) + //~ ram.W_0.mask[13] <= bits(RW0M, 13, 13) + //~ ram.W_0.mask[14] <= bits(RW0M, 14, 14) + //~ ram.W_0.mask[15] <= bits(RW0M, 15, 15) - module vendor_sram_4 : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<4> - output RW0O : UInt<4> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<4> + //~ module vendor_sram_4 : + //~ input clock : Clock + //~ input RW0A : UInt<11> + //~ input RW0I : UInt<4> + //~ output RW0O : UInt<4> + //~ input RW0E : UInt<1> + //~ input RW0W : UInt<1> + //~ input RW0M : UInt<4> - mem ram : - data-type => UInt<1>[4] - depth => 2048 - read-latency => 0 - write-latency => 1 - reader => R_0 - writer => W_0 - read-under-write => undefined - reg R_0_addr_reg : UInt<11>, clock with : - reset => (UInt<1>("h0"), R_0_addr_reg) - ram.R_0.clk <= clock - ram.R_0.addr <= R_0_addr_reg - ram.R_0.en <= RW0E - RW0O <= cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))) - R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) - ram.W_0.clk <= clock - ram.W_0.addr <= RW0A - ram.W_0.en <= and(RW0E, RW0W) - ram.W_0.data[0] <= bits(RW0I, 0, 0) - ram.W_0.data[1] <= bits(RW0I, 1, 1) - ram.W_0.data[2] <= bits(RW0I, 2, 2) - ram.W_0.data[3] <= bits(RW0I, 3, 3) - ram.W_0.mask[0] <= bits(RW0M, 0, 0) - ram.W_0.mask[1] <= bits(RW0M, 1, 1) - ram.W_0.mask[2] <= bits(RW0M, 2, 2) - ram.W_0.mask[3] <= bits(RW0M, 3, 3) -""" - compile(mem, None, v, true) - execute(Some(mem), None, true, output) -} + //~ mem ram : + //~ data-type => UInt<1>[4] + //~ depth => 2048 + //~ read-latency => 0 + //~ write-latency => 1 + //~ reader => R_0 + //~ writer => W_0 + //~ read-under-write => undefined + //~ reg R_0_addr_reg : UInt<11>, clock with : + //~ reset => (UInt<1>("h0"), R_0_addr_reg) + //~ ram.R_0.clk <= clock + //~ ram.R_0.addr <= R_0_addr_reg + //~ ram.R_0.en <= RW0E + //~ RW0O <= cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))) + //~ R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) + //~ ram.W_0.clk <= clock + //~ ram.W_0.addr <= RW0A + //~ ram.W_0.en <= and(RW0E, RW0W) + //~ ram.W_0.data[0] <= bits(RW0I, 0, 0) + //~ ram.W_0.data[1] <= bits(RW0I, 1, 1) + //~ ram.W_0.data[2] <= bits(RW0I, 2, 2) + //~ ram.W_0.data[3] <= bits(RW0I, 3, 3) + //~ ram.W_0.mask[0] <= bits(RW0M, 0, 0) + //~ ram.W_0.mask[1] <= bits(RW0M, 1, 1) + //~ ram.W_0.mask[2] <= bits(RW0M, 2, 2) + //~ ram.W_0.mask[3] <= bits(RW0M, 3, 3) +//~ """ + //~ compile(mem, None, v, true) + //~ execute(Some(mem), None, true, output) +//~ } -class Synflops32x32_2rw extends MacroCompilerSpec { - val mem = new File(macroDir, "lib-32x32-2rw.json") - val v = new File(testDir, "syn_flops_32x32_2rw.v") - val output = -""" -circuit SRAM2RW32x32 : - module SRAM2RW32x32 : - input CE1 : Clock - input A1 : UInt<5> - input I1 : UInt<32> - output O1 : UInt<32> - input CSB1 : UInt<1> - input OEB1 : UInt<1> - input WEB1 : UInt<1> - input CE2 : Clock - input A2 : UInt<5> - input I2 : UInt<32> - output O2 : UInt<32> - input CSB2 : UInt<1> - input OEB2 : UInt<1> - input WEB2 : UInt<1> +//~ class Synflops32x32_2rw extends MacroCompilerSpec { + //~ val mem = new File(macroDir, "lib-32x32-2rw.json") + //~ val v = new File(testDir, "syn_flops_32x32_2rw.v") + //~ val output = +//~ """ +//~ circuit SRAM2RW32x32 : + //~ module SRAM2RW32x32 : + //~ input CE1 : Clock + //~ input A1 : UInt<5> + //~ input I1 : UInt<32> + //~ output O1 : UInt<32> + //~ input CSB1 : UInt<1> + //~ input OEB1 : UInt<1> + //~ input WEB1 : UInt<1> + //~ input CE2 : Clock + //~ input A2 : UInt<5> + //~ input I2 : UInt<32> + //~ output O2 : UInt<32> + //~ input CSB2 : UInt<1> + //~ input OEB2 : UInt<1> + //~ input WEB2 : UInt<1> - mem ram : - data-type => UInt<32> - depth => 32 - read-latency => 0 - write-latency => 1 - reader => R_0 - reader => R_1 - writer => W_0 - writer => W_1 - read-under-write => undefined - reg R_0_addr_reg : UInt<5>, CE1 with : - reset => (UInt<1>("h0"), R_0_addr_reg) - ram.R_0.clk <= CE1 - ram.R_0.addr <= R_0_addr_reg - ram.R_0.en <= and(not(CSB1), not(OEB1)) - O1 <= ram.R_0.data - R_0_addr_reg <= mux(and(not(CSB1), not(OEB1)), A1, R_0_addr_reg) - reg R_1_addr_reg : UInt<5>, CE2 with : - reset => (UInt<1>("h0"), R_1_addr_reg) - ram.R_1.clk <= CE2 - ram.R_1.addr <= R_1_addr_reg - ram.R_1.en <= and(not(CSB2), not(OEB2)) - O2 <= ram.R_1.data - R_1_addr_reg <= mux(and(not(CSB2), not(OEB2)), A2, R_1_addr_reg) - ram.W_0.clk <= CE1 - ram.W_0.addr <= A1 - ram.W_0.en <= and(not(CSB1), not(WEB1)) - ram.W_0.data <= I1 - ram.W_0.mask <= UInt<1>("h1") - ram.W_1.clk <= CE2 - ram.W_1.addr <= A2 - ram.W_1.en <= and(not(CSB2), not(WEB2)) - ram.W_1.data <= I2 - ram.W_1.mask <= UInt<1>("h1") -""" - compile(mem, None, v, true) - execute(Some(mem), None, true, output) -} + //~ mem ram : + //~ data-type => UInt<32> + //~ depth => 32 + //~ read-latency => 0 + //~ write-latency => 1 + //~ reader => R_0 + //~ reader => R_1 + //~ writer => W_0 + //~ writer => W_1 + //~ read-under-write => undefined + //~ reg R_0_addr_reg : UInt<5>, CE1 with : + //~ reset => (UInt<1>("h0"), R_0_addr_reg) + //~ ram.R_0.clk <= CE1 + //~ ram.R_0.addr <= R_0_addr_reg + //~ ram.R_0.en <= and(not(CSB1), not(OEB1)) + //~ O1 <= ram.R_0.data + //~ R_0_addr_reg <= mux(and(not(CSB1), not(OEB1)), A1, R_0_addr_reg) + //~ reg R_1_addr_reg : UInt<5>, CE2 with : + //~ reset => (UInt<1>("h0"), R_1_addr_reg) + //~ ram.R_1.clk <= CE2 + //~ ram.R_1.addr <= R_1_addr_reg + //~ ram.R_1.en <= and(not(CSB2), not(OEB2)) + //~ O2 <= ram.R_1.data + //~ R_1_addr_reg <= mux(and(not(CSB2), not(OEB2)), A2, R_1_addr_reg) + //~ ram.W_0.clk <= CE1 + //~ ram.W_0.addr <= A1 + //~ ram.W_0.en <= and(not(CSB1), not(WEB1)) + //~ ram.W_0.data <= I1 + //~ ram.W_0.mask <= UInt<1>("h1") + //~ ram.W_1.clk <= CE2 + //~ ram.W_1.addr <= A2 + //~ ram.W_1.en <= and(not(CSB2), not(WEB2)) + //~ ram.W_1.data <= I2 + //~ ram.W_1.mask <= UInt<1>("h1") +//~ """ + //~ compile(mem, None, v, true) + //~ execute(Some(mem), None, true, output) +//~ } From 42febeb32a621df44710dfec8bbade6bac9c5ae9 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 19 Jul 2017 19:21:32 -0700 Subject: [PATCH 036/273] Rename files --- .../macros/{SplitDepth.scala => SimpleSplitDepth.scala} | 0 .../macros/{SplitWidth.scala => SimpleSplitWidth.scala} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tapeout/src/test/scala/transforms/macros/{SplitDepth.scala => SimpleSplitDepth.scala} (100%) rename tapeout/src/test/scala/transforms/macros/{SplitWidth.scala => SimpleSplitWidth.scala} (100%) diff --git a/tapeout/src/test/scala/transforms/macros/SplitDepth.scala b/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala similarity index 100% rename from tapeout/src/test/scala/transforms/macros/SplitDepth.scala rename to tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala diff --git a/tapeout/src/test/scala/transforms/macros/SplitWidth.scala b/tapeout/src/test/scala/transforms/macros/SimpleSplitWidth.scala similarity index 100% rename from tapeout/src/test/scala/transforms/macros/SplitWidth.scala rename to tapeout/src/test/scala/transforms/macros/SimpleSplitWidth.scala From 3cb424cf80f31b3ac09191f071c00c2707afa051 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 21 Jul 2017 07:30:18 -0700 Subject: [PATCH 037/273] Add non power of two tests --- .../transforms/macros/SimpleSplitDepth.scala | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala b/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala index d7ffebb0e..fc6a0b2e4 100644 --- a/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala +++ b/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala @@ -4,6 +4,7 @@ import firrtl.Utils.ceilLog2 import mdf.macrolib._ // Test the depth splitting aspect of the memory compiler. +// This file is for simple tests: one read-write port, powers of two sizes, etc. // For example, implementing a 4096x32 memory using four 1024x32 memories. trait HasSimpleDepthTestGenerator { @@ -33,7 +34,9 @@ trait HasSimpleDepthTestGenerator { writeToLib(lib, Seq(generateSRAM(lib_name, "lib", width, lib_depth))) writeToMem(mem, Seq(generateSRAM(mem_name, "outer", width, mem_depth))) - val expectedInstances = mem_depth / lib_depth + // Number of lib instances needed to hold the mem. + // Round up (e.g. 1.5 instances = effectively 2 instances) + val expectedInstances = math.ceil(mem_depth.toFloat / lib_depth).toInt val selectBits = mem_addr_width - lib_addr_width var output = s""" @@ -151,9 +154,18 @@ class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H } // Non power of two -class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { +class SplitDepth2000x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val mem_depth = 1024 + override lazy val mem_depth = 2000 + override lazy val lib_depth = 1024 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 8 + override lazy val mem_depth = 2049 override lazy val lib_depth = 1024 compile(mem, lib, v, false) From 80ca2e538f5b43ef97c44807591a3009d4ee847f Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 21 Jul 2017 10:32:48 -0700 Subject: [PATCH 038/273] Use require statement --- .../src/main/scala/transforms/macros/MacroCompiler.scala | 7 ++----- tapeout/src/main/scala/transforms/macros/Utils.scala | 6 +++--- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala index 9112351ec..46ac353c2 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -176,11 +176,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], if (libPort.src.effectiveMaskGran == libPort.src.width) { bits(WRef(mem), low / memPort.src.effectiveMaskGran) } else { - if (libPort.src.effectiveMaskGran != 1) { - // TODO - System.err println "only single-bit mask supported" - return None - } + require(libPort.src.effectiveMaskGran == 1, "only single-bit mask supported for now") + cat(((low to high) map (i => bits(WRef(mem), i / memPort.src.effectiveMaskGran))).reverse) } case None => diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index 2355c9f3a..49c6d07f3 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -18,9 +18,9 @@ class FirrtlMacroPort(port: MacroPort) { val isWriter = !port.writeEnable.isEmpty && port.readEnable.isEmpty val isReadWriter = !port.writeEnable.isEmpty && !port.readEnable.isEmpty - val AddrType = UIntType(IntWidth(ceilLog2(port.depth) max 1)) - val DataType = UIntType(IntWidth(port.width)) - val MaskType = UIntType(IntWidth(port.width / port.effectiveMaskGran)) + val addrType = UIntType(IntWidth(ceilLog2(port.depth) max 1)) + val dataType = UIntType(IntWidth(port.width)) + val maskType = UIntType(IntWidth(port.width / port.effectiveMaskGran)) // Bundle representing this macro port. val tpe = BundleType(Seq( From cf0d40f658ba4d14d286655ed294306518f2334e Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 21 Jul 2017 10:33:19 -0700 Subject: [PATCH 039/273] Fix typos --- tapeout/src/main/scala/transforms/macros/Utils.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index 49c6d07f3..275943e82 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -25,13 +25,13 @@ class FirrtlMacroPort(port: MacroPort) { // Bundle representing this macro port. val tpe = BundleType(Seq( Field(port.clock.name, Flip, ClockType), - Field(port.address.name, Flip, AddrType)) ++ - (port.input map (p => Field(p.name, Flip, DataType))) ++ - (port.output map (p => Field(p.name, Default, DataType))) ++ + Field(port.address.name, Flip, addrType)) ++ + (port.input map (p => Field(p.name, Flip, dataType))) ++ + (port.output map (p => Field(p.name, Default, dataType))) ++ (port.chipEnable map (p => Field(p.name, Flip, BoolType))) ++ (port.readEnable map (p => Field(p.name, Flip, BoolType))) ++ (port.writeEnable map (p => Field(p.name, Flip, BoolType))) ++ - (port.maskPort map (p => Field(p.name, Flip, MaskType))) + (port.maskPort map (p => Field(p.name, Flip, maskType))) ) val ports = tpe.fields map (f => Port( NoInfo, f.name, f.flip match { case Default => Output case Flip => Input }, f.tpe)) From bb2783994ad3aaf0dd396fd236eb8c58c94a5418 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 21 Jul 2017 10:33:29 -0700 Subject: [PATCH 040/273] Only use powers of two masks, for now --- tapeout/src/main/scala/transforms/macros/MacroCompiler.scala | 3 +++ tapeout/src/main/scala/transforms/macros/Utils.scala | 3 +++ 2 files changed, 6 insertions(+) diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala index 46ac353c2..54c35cc96 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala @@ -178,6 +178,9 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } else { require(libPort.src.effectiveMaskGran == 1, "only single-bit mask supported for now") + require(isPowerOfTwo(memPort.src.effectiveMaskGran), "only powers of two masks supported for now") + require(isPowerOfTwo(libPort.src.effectiveMaskGran), "only powers of two masks supported for now") + cat(((low to high) map (i => bits(WRef(mem), i / memPort.src.effectiveMaskGran))).reverse) } case None => diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index 275943e82..72b9c631f 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -93,4 +93,7 @@ object Utils { case Some(ActiveLow) | Some(NegativeEdge) => not(exp) case _ => exp } + + // Check if a number is a power of two + def isPowerOfTwo(x: Int): Boolean = (x & (x - 1)) == 0 } From 8beb8b3f6fc469dcf8dbaf94be53774146751ec1 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 21 Jul 2017 11:03:34 -0700 Subject: [PATCH 041/273] Don't unbox BigInt to Double --- tapeout/src/main/scala/transforms/macros/Utils.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/tapeout/src/main/scala/transforms/macros/Utils.scala index 72b9c631f..19591f4c1 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/tapeout/src/main/scala/transforms/macros/Utils.scala @@ -51,8 +51,8 @@ class Macro(srcMacro: SRAMMacro) { val extraPorts = srcMacro.extraPorts map { p => assert(p.portType == Constant) // TODO: release it? val name = p.name - val width = BigInt(p.width.asInstanceOf[Double].toLong) - val value = BigInt(p.value.asInstanceOf[Double].toLong) + val width = BigInt(p.width.toLong) + val value = BigInt(p.value.toLong) (name -> UIntLiteral(value, IntWidth(width))) } From 607e810b1ddfeab99b584fea166c9733ea867e6f Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 21 Jul 2017 15:16:26 -0700 Subject: [PATCH 042/273] Autogenerate almost all the depth tests --- .../transforms/macros/MacroCompilerSpec.scala | 5 +- .../transforms/macros/SimpleSplitDepth.scala | 390 ++++++++++-------- 2 files changed, 226 insertions(+), 169 deletions(-) diff --git a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala index a99397151..c42d4b776 100644 --- a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala +++ b/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala @@ -86,7 +86,7 @@ trait HasSRAMGenerator { import mdf.macrolib._ // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). - def generateSRAM(name: String, prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None): SRAMMacro = { + def generateSRAM(name: String, prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMMacro = { val realPrefix = prefix + "_" SRAMMacro( macroType=SRAM, @@ -110,7 +110,8 @@ trait HasSRAMGenerator { maskGran=maskGran, width=width, depth=depth // These numbers don't matter here. - )) + )), + extraPorts=extraPorts ) } } diff --git a/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala b/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala index fc6a0b2e4..d25aeb27f 100644 --- a/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala +++ b/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala @@ -15,15 +15,28 @@ trait HasSimpleDepthTestGenerator { def width: Int def mem_depth: Int def lib_depth: Int + def mem_maskGran: Option[Int] = None + def lib_maskGran: Option[Int] = None + def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() require (mem_depth >= lib_depth) override val memPrefix = testDir override val libPrefix = testDir - val mem = s"mem-${mem_depth}x${width}-rw.json" - val lib = s"lib-${lib_depth}x${width}-rw.json" - val v = s"split_depth_${mem_depth}x${width}_rw.v" + // Convenience variables to check if a mask exists. + val memHasMask = mem_maskGran != None + val libHasMask = lib_maskGran != None + // We need to figure out how many mask bits there are in the mem. + val memMaskBits = if (memHasMask) width / mem_maskGran.get else 0 + val libMaskBits = if (libHasMask) width / lib_maskGran.get else 0 + // Generate "mrw" vs "rw" tags. + val memTag = (if (memHasMask) "m" else "") + "rw" + val libTag = (if (libHasMask) "m" else "") + "rw" + + val mem = s"mem-${mem_depth}x${width}-${memTag}.json" + val lib = s"lib-${lib_depth}x${width}-${libTag}.json" + val v = s"split_depth_${mem_depth}x${width}_${memTag}.v" val mem_name = "target_memory" val mem_addr_width = ceilLog2(mem_depth) @@ -31,15 +44,16 @@ trait HasSimpleDepthTestGenerator { val lib_name = "awesome_lib_mem" val lib_addr_width = ceilLog2(lib_depth) - writeToLib(lib, Seq(generateSRAM(lib_name, "lib", width, lib_depth))) - writeToMem(mem, Seq(generateSRAM(mem_name, "outer", width, mem_depth))) + writeToLib(lib, Seq(generateSRAM(lib_name, "lib", width, lib_depth, lib_maskGran, extraPorts))) + writeToMem(mem, Seq(generateSRAM(mem_name, "outer", width, mem_depth, mem_maskGran))) // Number of lib instances needed to hold the mem. // Round up (e.g. 1.5 instances = effectively 2 instances) val expectedInstances = math.ceil(mem_depth.toFloat / lib_depth).toInt val selectBits = mem_addr_width - lib_addr_width - var output = -s""" + + val headerMask = if (memHasMask) s"input outer_mask : UInt<${memMaskBits}>" else "" + val header = s""" circuit $mem_name : module $mem_name : input outer_clk : Clock @@ -47,8 +61,24 @@ circuit $mem_name : input outer_din : UInt<$width> output outer_dout : UInt<$width> input outer_write_en : UInt<1> + ${headerMask} """ + val footerMask = if (libHasMask) s"input lib_mask : UInt<${libMaskBits}>" else "" + val footer = s""" + extmodule $lib_name : + input lib_clk : Clock + input lib_addr : UInt<$lib_addr_width> + input lib_din : UInt<$width> + output lib_dout : UInt<$width> + input lib_write_en : UInt<1> + ${footerMask} + + defname = $lib_name +""" + + var output = header + if (selectBits > 0) { output += s""" @@ -57,6 +87,26 @@ s""" } for (i <- 0 to expectedInstances - 1) { + // We only support simple masks for now (either libMask == memMask or libMask == 1) + val maskStatement = if (libHasMask) { + if (lib_maskGran.get == mem_maskGran.get) { + s"""mem_${i}_0.lib_mask <= bits(outer_mask, 0, 0)""" + } else if (lib_maskGran.get == 1) { + // Construct a mask string. + // Each bit gets the # of bits specified in maskGran. + // Specify in descending order (MSB first) + + // This builds an array like m[1], m[1], m[0], m[0] + val maskBitsArr: Seq[String] = ((memMaskBits - 1 to 0 by -1) flatMap (maskBit => { + ((0 to mem_maskGran.get - 1) map (_ => s"bits(outer_mask, ${maskBit}, ${maskBit})")) + })) + // Now build it into a recursive string like + // cat(m[1], cat(m[1], cat(m[0], m[0]))) + val maskBitsStr: String = maskBitsArr.reverse.tail.foldLeft(maskBitsArr.reverse.head)((prev: String, next: String) => s"cat(${next}, ${prev})") + s"""mem_${i}_0.lib_mask <= ${maskBitsStr}""" + } else "" // TODO: implement when non-bitmasked memories are supported + } else "" // No mask + val enableIdentifier = if (selectBits > 0) s"""eq(outer_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" output += s""" @@ -65,6 +115,7 @@ s""" mem_${i}_0.lib_addr <= outer_addr node outer_dout_${i}_0 = bits(mem_${i}_0.lib_dout, ${width - 1}, 0) mem_${i}_0.lib_din <= bits(outer_din, ${width - 1}, 0) + ${maskStatement} mem_${i}_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), ${enableIdentifier}) node outer_dout_${i} = outer_dout_${i}_0 """ @@ -85,17 +136,7 @@ s""" output += """mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0"))""" } - output += -s""" - extmodule $lib_name : - input lib_clk : Clock - input lib_addr : UInt<$lib_addr_width> - input lib_din : UInt<$width> - output lib_dout : UInt<$width> - input lib_write_en : UInt<1> - - defname = $lib_name -""" + output += footer } // Try different widths @@ -174,109 +215,123 @@ class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H // Masked RAMs -class SplitDepth2048x8_mrw extends MacroCompilerSpec { - val mem = "mem-2048x8-mrw.json" - val lib = "lib-1024x8-mrw.json" - val v = "split_depth_2048x8_mrw.v" - val output = -""" -circuit name_of_sram_module : - module name_of_sram_module : - input clock : Clock - input RW0A : UInt<11> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> - - node RW0A_sel = bits(RW0A, 10, 10) - inst mem_0_0 of vendor_sram - mem_0_0.clock <= clock - mem_0_0.RW0A <= RW0A - node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - mem_0_0.RW0I <= bits(RW0I, 7, 0) - mem_0_0.RW0M <= bits(RW0M, 0, 0) - mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) - mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) - node RW0O_0 = RW0O_0_0 - inst mem_1_0 of vendor_sram - mem_1_0.clock <= clock - mem_1_0.RW0A <= RW0A - node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) - mem_1_0.RW0I <= bits(RW0I, 7, 0) - mem_1_0.RW0M <= bits(RW0M, 0, 0) - mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) - mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) - node RW0O_1 = RW0O_1_0 - RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) - - extmodule vendor_sram : - input clock : Clock - input RW0A : UInt<10> - input RW0I : UInt<8> - output RW0O : UInt<8> - input RW0E : UInt<1> - input RW0W : UInt<1> - input RW0M : UInt<1> - - defname = vendor_sram -""" +// Test for mem mask == lib mask (i.e. mask is a write enable bit) +class SplitDepth2048x32_mrw_lib32 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(32) + override lazy val lib_maskGran = Some(32) + compile(mem, lib, v, false) execute(mem, lib, false, output) } -//~ class SplitDepth2048x8_n28 extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x8-mrw.json") - //~ val lib = new File(macroDir, "lib-1024x8-n28.json") - //~ val v = new File(testDir, "split_depth_2048x8_n28.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<8> - //~ output RW0O : UInt<8> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<1> - - //~ node RW0A_sel = bits(RW0A, 10, 10) - //~ inst mem_0_0 of vendor_sram - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) - //~ mem_0_0.RW0M <= cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))) - //~ mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) - //~ mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) - //~ node RW0O_0 = RW0O_0_0 - //~ inst mem_1_0 of vendor_sram - //~ mem_1_0.clock <= clock - //~ mem_1_0.RW0A <= RW0A - //~ node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) - //~ mem_1_0.RW0I <= bits(RW0I, 7, 0) - //~ mem_1_0.RW0M <= cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))) - //~ mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) - //~ mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) - //~ node RW0O_1 = RW0O_1_0 - //~ RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) +class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 8 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(8) + override lazy val lib_maskGran = Some(8) - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<10> - //~ input RW0I : UInt<8> - //~ output RW0O : UInt<8> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<8> + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} - //~ defname = vendor_sram -//~ """ - //~ compile(mem, lib, v, false) - //~ execute(mem, lib, false, output) -//~ } +// Non-bit level mask +class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 64 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(32) + override lazy val lib_maskGran = Some(8) + + it should "be enabled when non-bitmasked memories are supported" is (pending) + //compile(mem, lib, v, false) + //execute(mem, lib, false, output) +} + +// Bit level mask +class SplitDepth2048x32_mrw_mem16_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(16) + override lazy val lib_maskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(8) + override lazy val lib_maskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(4) + override lazy val lib_maskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(2) + override lazy val lib_maskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Non-powers of 2 mask sizes +class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(3) + override lazy val lib_maskGran = Some(1) + + it should "be enabled when non-power of two masks are supported" is (pending) + //compile(mem, lib, v, false) + //execute(mem, lib, false, output) +} + +class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(7) + override lazy val lib_maskGran = Some(1) + + it should "be enabled when non-power of two masks are supported" is (pending) + //compile(mem, lib, v, false) + //execute(mem, lib, false, output) +} + +class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val mem_maskGran = Some(9) + override lazy val lib_maskGran = Some(1) + + it should "be enabled when non-power of two masks are supported" is (pending) + //compile(mem, lib, v, false) + //execute(mem, lib, false, output) +} //~ class SplitDepth2048x8_r_mw extends MacroCompilerSpec { //~ val mem = new File(macroDir, "mem-2048x8-r-mw.json") @@ -338,58 +393,59 @@ circuit name_of_sram_module : //~ execute(mem, lib, false, output) //~ } +// Try an extra port +class SplitDepth2048x8_extraPort extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + import mdf.macrolib._ -//~ class SplitDepth2048x8_mrw_Sleep extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x8-mrw.json") - //~ val lib = new File(macroDir, "lib-1024x8-sleep.json") - //~ val v = new File(testDir, "split_depth_2048x8_sleep.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<8> - //~ output RW0O : UInt<8> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<1> - - //~ node RW0A_sel = bits(RW0A, 10, 10) - //~ inst mem_0_0 of vendor_sram - //~ mem_0_0.sleep <= UInt<1>("h0") - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) - //~ mem_0_0.RW0M <= bits(RW0M, 0, 0) - //~ mem_0_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h0"))) - //~ mem_0_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h0"))) - //~ node RW0O_0 = RW0O_0_0 - //~ inst mem_1_0 of vendor_sram - //~ mem_1_0.sleep <= UInt<1>("h0") - //~ mem_1_0.clock <= clock - //~ mem_1_0.RW0A <= RW0A - //~ node RW0O_1_0 = bits(mem_1_0.RW0O, 7, 0) - //~ mem_1_0.RW0I <= bits(RW0I, 7, 0) - //~ mem_1_0.RW0M <= bits(RW0M, 0, 0) - //~ mem_1_0.RW0W <= and(RW0W, eq(RW0A_sel, UInt<1>("h1"))) - //~ mem_1_0.RW0E <= and(RW0E, eq(RW0A_sel, UInt<1>("h1"))) - //~ node RW0O_1 = RW0O_1_0 - //~ RW0O <= mux(eq(RW0A_sel, UInt<1>("h0")), RW0O_0, mux(eq(RW0A_sel, UInt<1>("h1")), RW0O_1, UInt<1>("h0"))) + override lazy val width = 8 + override lazy val mem_depth = 2048 + override lazy val lib_depth = 1024 + override lazy val extraPorts = List( + MacroExtraPort(name="extra_port", width=8, portType=Constant, value=0xff) + ) - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<10> - //~ input RW0I : UInt<8> - //~ output RW0O : UInt<8> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<1> - //~ input sleep : UInt<1> + val outputCustom = +""" +circuit target_memory : + module target_memory : + input outer_clk : Clock + input outer_addr : UInt<11> + input outer_din : UInt<8> + output outer_dout : UInt<8> + input outer_write_en : UInt<1> - //~ defname = vendor_sram -//~ """ - //~ compile(mem, lib, v, false) - //~ execute(mem, lib, false, output) -//~ } + node outer_addr_sel = bits(outer_addr, 10, 10) + + inst mem_0_0 of awesome_lib_mem + mem_0_0.extra_port <= UInt<8>("hff") + mem_0_0.lib_clk <= outer_clk + mem_0_0.lib_addr <= outer_addr + node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) + mem_0_0.lib_din <= bits(outer_din, 7, 0) + + mem_0_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h0"))) + node outer_dout_0 = outer_dout_0_0 + + inst mem_1_0 of awesome_lib_mem + mem_1_0.extra_port <= UInt<8>("hff") + mem_1_0.lib_clk <= outer_clk + mem_1_0.lib_addr <= outer_addr + node outer_dout_1_0 = bits(mem_1_0.lib_dout, 7, 0) + mem_1_0.lib_din <= bits(outer_din, 7, 0) + + mem_1_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h1"))) + node outer_dout_1 = outer_dout_1_0 + outer_dout <= mux(eq(outer_addr_sel, UInt<1>("h0")), outer_dout_0, mux(eq(outer_addr_sel, UInt<1>("h1")), outer_dout_1, UInt<1>("h0"))) + extmodule awesome_lib_mem : + input lib_clk : Clock + input lib_addr : UInt<10> + input lib_din : UInt<8> + output lib_dout : UInt<8> + input lib_write_en : UInt<1> + input extra_port : UInt<8> + + defname = awesome_lib_mem + """ + compile(mem, lib, v, false) + execute(mem, lib, false, outputCustom) +} From 9de1f5f2c0e59ef5299311ebf3d781025b3d703a Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Tue, 25 Jul 2017 13:48:58 -0700 Subject: [PATCH 043/273] restructure macros for better submoduling --- build.sbt | 36 ++++++++++--------- .../src/main/scala}/MacroCompiler.scala | 2 +- .../src/main/scala}/SynFlops.scala | 2 +- .../src/main/scala}/Utils.scala | 2 +- .../src/test/resources}/lib-1024x8-mrw.json | 0 .../src/test/resources}/lib-1024x8-n28.json | 0 .../src/test/resources}/lib-1024x8-r-mw.json | 0 .../src/test/resources}/lib-1024x8-sleep.json | 0 .../src/test/resources}/lib-2048x10-rw.json | 0 .../src/test/resources}/lib-2048x16-n28.json | 0 .../test/resources}/lib-2048x8-mrw-re.json | 0 .../src/test/resources}/lib-2048x8-mrw.json | 0 .../src/test/resources}/lib-32x32-2rw.json | 0 .../src/test/resources}/lib-32x80-mrw.json | 0 .../src/test/resources}/mem-2000x8-mrw.json | 0 .../test/resources}/mem-2048x16-mrw-2.json | 0 .../src/test/resources}/mem-2048x16-mrw.json | 0 .../src/test/resources}/mem-2048x20-mrw.json | 0 .../src/test/resources}/mem-2048x8-mrw.json | 0 .../src/test/resources}/mem-2048x8-r-mw.json | 0 .../src/test/resources}/mem-24x52-r-w.json | 0 .../src/test/resources}/mem-32x160-mrw.json | 0 .../src/test/resources}/mylib.json | 0 .../src/test/resources}/rocketchip.json | 0 .../src/test/scala}/MacroCompilerSpec.scala | 3 +- .../src/test/scala}/SimpleSplitDepth.scala | 2 +- .../src/test/scala}/SimpleSplitWidth.scala | 2 +- .../src/test/scala}/SynFlops.scala | 0 28 files changed, 25 insertions(+), 24 deletions(-) rename {tapeout/src/main/scala/transforms/macros => macros/src/main/scala}/MacroCompiler.scala (99%) rename {tapeout/src/main/scala/transforms/macros => macros/src/main/scala}/SynFlops.scala (98%) rename {tapeout/src/main/scala/transforms/macros => macros/src/main/scala}/Utils.scala (98%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-1024x8-mrw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-1024x8-n28.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-1024x8-r-mw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-1024x8-sleep.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-2048x10-rw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-2048x16-n28.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-2048x8-mrw-re.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-2048x8-mrw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-32x32-2rw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/lib-32x80-mrw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mem-2000x8-mrw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mem-2048x16-mrw-2.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mem-2048x16-mrw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mem-2048x20-mrw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mem-2048x8-mrw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mem-2048x8-r-mw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mem-24x52-r-w.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mem-32x160-mrw.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/mylib.json (100%) rename {tapeout/src/test/resources/macros => macros/src/test/resources}/rocketchip.json (100%) rename {tapeout/src/test/scala/transforms/macros => macros/src/test/scala}/MacroCompilerSpec.scala (99%) rename {tapeout/src/test/scala/transforms/macros => macros/src/test/scala}/SimpleSplitDepth.scala (99%) rename {tapeout/src/test/scala/transforms/macros => macros/src/test/scala}/SimpleSplitWidth.scala (99%) rename {tapeout/src/test/scala/transforms/macros => macros/src/test/scala}/SynFlops.scala (100%) diff --git a/build.sbt b/build.sbt index cf97adbab..c133667e8 100644 --- a/build.sbt +++ b/build.sbt @@ -2,31 +2,33 @@ import Dependencies._ +val defaultVersions = Map( + "chisel3" -> "3.1-SNAPSHOT", + "chisel-iotesters" -> "1.2-SNAPSHOT" +) + lazy val commonSettings = Seq( organization := "edu.berkeley.cs", version := "0.1-SNAPSHOT", scalaVersion := "2.11.8", scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls"), - libraryDependencies ++= commonDependencies -) - -val defaultVersions = Map( - "chisel3" -> "3.1-SNAPSHOT", - "chisel-iotesters" -> "1.2-SNAPSHOT" + libraryDependencies ++= commonDependencies, + libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { + dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) + }, + resolvers ++= Seq( + Resolver.sonatypeRepo("snapshots"), + Resolver.sonatypeRepo("releases") + ) ) -lazy val mdf = RootProject(file("mdf/scalalib")) +lazy val mdf = (project in file("mdf/scalalib")) +lazy val macros = (project in file("macros")) + .dependsOn(mdf) + .settings(commonSettings) lazy val tapeout = (project in file("tapeout")) - .dependsOn(mdf) .settings(commonSettings) - .settings( - libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { - dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) - }, - resolvers ++= Seq( - Resolver.sonatypeRepo("snapshots"), - Resolver.sonatypeRepo("releases") - ) - ) .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) + +lazy val root = (project in file(".")).aggregate(macros, tapeout) diff --git a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala similarity index 99% rename from tapeout/src/main/scala/transforms/macros/MacroCompiler.scala rename to macros/src/main/scala/MacroCompiler.scala index 54c35cc96..ea800956e 100644 --- a/tapeout/src/main/scala/transforms/macros/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -1,6 +1,6 @@ // See LICENSE for license details. -package barstools.tapeout.transforms.macros +package barstools.macros import firrtl._ import firrtl.ir._ diff --git a/tapeout/src/main/scala/transforms/macros/SynFlops.scala b/macros/src/main/scala/SynFlops.scala similarity index 98% rename from tapeout/src/main/scala/transforms/macros/SynFlops.scala rename to macros/src/main/scala/SynFlops.scala index c4a848726..e005196a5 100644 --- a/tapeout/src/main/scala/transforms/macros/SynFlops.scala +++ b/macros/src/main/scala/SynFlops.scala @@ -1,6 +1,6 @@ // See LICENSE for license details. -package barstools.tapeout.transforms.macros +package barstools.macros import firrtl._ import firrtl.ir._ diff --git a/tapeout/src/main/scala/transforms/macros/Utils.scala b/macros/src/main/scala/Utils.scala similarity index 98% rename from tapeout/src/main/scala/transforms/macros/Utils.scala rename to macros/src/main/scala/Utils.scala index 19591f4c1..9a2a11b8e 100644 --- a/tapeout/src/main/scala/transforms/macros/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -1,6 +1,6 @@ // See LICENSE for license details. -package barstools.tapeout.transforms.macros +package barstools.macros import firrtl._ import firrtl.ir._ diff --git a/tapeout/src/test/resources/macros/lib-1024x8-mrw.json b/macros/src/test/resources/lib-1024x8-mrw.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-1024x8-mrw.json rename to macros/src/test/resources/lib-1024x8-mrw.json diff --git a/tapeout/src/test/resources/macros/lib-1024x8-n28.json b/macros/src/test/resources/lib-1024x8-n28.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-1024x8-n28.json rename to macros/src/test/resources/lib-1024x8-n28.json diff --git a/tapeout/src/test/resources/macros/lib-1024x8-r-mw.json b/macros/src/test/resources/lib-1024x8-r-mw.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-1024x8-r-mw.json rename to macros/src/test/resources/lib-1024x8-r-mw.json diff --git a/tapeout/src/test/resources/macros/lib-1024x8-sleep.json b/macros/src/test/resources/lib-1024x8-sleep.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-1024x8-sleep.json rename to macros/src/test/resources/lib-1024x8-sleep.json diff --git a/tapeout/src/test/resources/macros/lib-2048x10-rw.json b/macros/src/test/resources/lib-2048x10-rw.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-2048x10-rw.json rename to macros/src/test/resources/lib-2048x10-rw.json diff --git a/tapeout/src/test/resources/macros/lib-2048x16-n28.json b/macros/src/test/resources/lib-2048x16-n28.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-2048x16-n28.json rename to macros/src/test/resources/lib-2048x16-n28.json diff --git a/tapeout/src/test/resources/macros/lib-2048x8-mrw-re.json b/macros/src/test/resources/lib-2048x8-mrw-re.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-2048x8-mrw-re.json rename to macros/src/test/resources/lib-2048x8-mrw-re.json diff --git a/tapeout/src/test/resources/macros/lib-2048x8-mrw.json b/macros/src/test/resources/lib-2048x8-mrw.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-2048x8-mrw.json rename to macros/src/test/resources/lib-2048x8-mrw.json diff --git a/tapeout/src/test/resources/macros/lib-32x32-2rw.json b/macros/src/test/resources/lib-32x32-2rw.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-32x32-2rw.json rename to macros/src/test/resources/lib-32x32-2rw.json diff --git a/tapeout/src/test/resources/macros/lib-32x80-mrw.json b/macros/src/test/resources/lib-32x80-mrw.json similarity index 100% rename from tapeout/src/test/resources/macros/lib-32x80-mrw.json rename to macros/src/test/resources/lib-32x80-mrw.json diff --git a/tapeout/src/test/resources/macros/mem-2000x8-mrw.json b/macros/src/test/resources/mem-2000x8-mrw.json similarity index 100% rename from tapeout/src/test/resources/macros/mem-2000x8-mrw.json rename to macros/src/test/resources/mem-2000x8-mrw.json diff --git a/tapeout/src/test/resources/macros/mem-2048x16-mrw-2.json b/macros/src/test/resources/mem-2048x16-mrw-2.json similarity index 100% rename from tapeout/src/test/resources/macros/mem-2048x16-mrw-2.json rename to macros/src/test/resources/mem-2048x16-mrw-2.json diff --git a/tapeout/src/test/resources/macros/mem-2048x16-mrw.json b/macros/src/test/resources/mem-2048x16-mrw.json similarity index 100% rename from tapeout/src/test/resources/macros/mem-2048x16-mrw.json rename to macros/src/test/resources/mem-2048x16-mrw.json diff --git a/tapeout/src/test/resources/macros/mem-2048x20-mrw.json b/macros/src/test/resources/mem-2048x20-mrw.json similarity index 100% rename from tapeout/src/test/resources/macros/mem-2048x20-mrw.json rename to macros/src/test/resources/mem-2048x20-mrw.json diff --git a/tapeout/src/test/resources/macros/mem-2048x8-mrw.json b/macros/src/test/resources/mem-2048x8-mrw.json similarity index 100% rename from tapeout/src/test/resources/macros/mem-2048x8-mrw.json rename to macros/src/test/resources/mem-2048x8-mrw.json diff --git a/tapeout/src/test/resources/macros/mem-2048x8-r-mw.json b/macros/src/test/resources/mem-2048x8-r-mw.json similarity index 100% rename from tapeout/src/test/resources/macros/mem-2048x8-r-mw.json rename to macros/src/test/resources/mem-2048x8-r-mw.json diff --git a/tapeout/src/test/resources/macros/mem-24x52-r-w.json b/macros/src/test/resources/mem-24x52-r-w.json similarity index 100% rename from tapeout/src/test/resources/macros/mem-24x52-r-w.json rename to macros/src/test/resources/mem-24x52-r-w.json diff --git a/tapeout/src/test/resources/macros/mem-32x160-mrw.json b/macros/src/test/resources/mem-32x160-mrw.json similarity index 100% rename from tapeout/src/test/resources/macros/mem-32x160-mrw.json rename to macros/src/test/resources/mem-32x160-mrw.json diff --git a/tapeout/src/test/resources/macros/mylib.json b/macros/src/test/resources/mylib.json similarity index 100% rename from tapeout/src/test/resources/macros/mylib.json rename to macros/src/test/resources/mylib.json diff --git a/tapeout/src/test/resources/macros/rocketchip.json b/macros/src/test/resources/rocketchip.json similarity index 100% rename from tapeout/src/test/resources/macros/rocketchip.json rename to macros/src/test/resources/rocketchip.json diff --git a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala similarity index 99% rename from tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala rename to macros/src/test/scala/MacroCompilerSpec.scala index c42d4b776..5ef71fc6b 100644 --- a/tapeout/src/test/scala/transforms/macros/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -1,6 +1,5 @@ -package barstools.tapeout.transforms.macros.test +package barstools.macros -import barstools.tapeout.transforms.macros._ import firrtl.ir.{Circuit, NoInfo} import firrtl.passes.RemoveEmpty import firrtl.Parser.parse diff --git a/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala similarity index 99% rename from tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala rename to macros/src/test/scala/SimpleSplitDepth.scala index d25aeb27f..94322fe10 100644 --- a/tapeout/src/test/scala/transforms/macros/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -1,4 +1,4 @@ -package barstools.tapeout.transforms.macros.test +package barstools.macros import firrtl.Utils.ceilLog2 import mdf.macrolib._ diff --git a/tapeout/src/test/scala/transforms/macros/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala similarity index 99% rename from tapeout/src/test/scala/transforms/macros/SimpleSplitWidth.scala rename to macros/src/test/scala/SimpleSplitWidth.scala index 1e172d643..f4c9faf1a 100644 --- a/tapeout/src/test/scala/transforms/macros/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -1,4 +1,4 @@ -//~ package barstools.tapeout.transforms.macros.test +//~ package barstools.macros //~ import java.io.File diff --git a/tapeout/src/test/scala/transforms/macros/SynFlops.scala b/macros/src/test/scala/SynFlops.scala similarity index 100% rename from tapeout/src/test/scala/transforms/macros/SynFlops.scala rename to macros/src/test/scala/SynFlops.scala From 7cb5604dfaf11524b6d7c9e2f3f17a910c693e9d Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Wed, 26 Jul 2017 00:15:08 -0700 Subject: [PATCH 044/273] add optimizations --- macros/src/main/scala/MacroCompiler.scala | 36 ++++++++++++++++------- macros/src/main/scala/Utils.scala | 6 ++-- 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index ea800956e..ec70949ef 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -14,9 +14,12 @@ import java.io.{File, FileWriter} import Utils._ object MacroCompilerAnnotation { - def apply(c: String, mem: String, lib: Option[String], synflops: Boolean) = { + def apply(c: String, mem: File, lib: Option[File], synflops: Boolean): Annotation = + apply(c, mem.toString, lib map (_.toString), synflops) + + def apply(c: String, mem: String, lib: Option[String], synflops: Boolean): Annotation = { Annotation(CircuitName(c), classOf[MacroCompilerTransform], - s"${mem} %s ${synflops}".format(lib map (_.toString) getOrElse "")) + s"${mem} %s ${synflops}".format(lib getOrElse "")) } private val matcher = "([^ ]+) ([^ ]*) (true|false)".r def unapply(a: Annotation) = a match { @@ -331,8 +334,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } class MacroCompilerTransform extends Transform { - def inputForm = HighForm - def outputForm = HighForm + def inputForm = MidForm + def outputForm = MidForm def execute(state: CircuitState) = getMyAnnotations(state) match { case Seq(MacroCompilerAnnotation(state.circuit.main, memFile, libFile, synflops)) => require(memFile.isDefined) @@ -349,19 +352,32 @@ class MacroCompilerTransform extends Transform { } val transforms = Seq( new MacroCompilerPass(mems, libs), - new SynFlopsPass(synflops, libs getOrElse mems.get), - firrtl.passes.SplitExpressions - ) - ((transforms foldLeft state)((s, xform) => xform runTransform s)) + new SynFlopsPass(synflops, libs getOrElse mems.get)) + (transforms foldLeft state)((s, xform) => xform runTransform s).copy(form=outputForm) + case _ => state } } +// FIXME: Use firrtl.LowerFirrtlOptimizations +class MacroCompilerOptimizations extends SeqTransform { + def inputForm = LowForm + def outputForm = LowForm + def transforms = Seq( + passes.RemoveValidIf, + new firrtl.transforms.ConstantPropagation, + passes.memlib.VerilogMemDelays, + new firrtl.transforms.ConstantPropagation, + passes.Legalize, + passes.SplitExpressions, + passes.CommonSubexpressionElimination) +} + class MacroCompiler extends Compiler { def emitter = new VerilogEmitter def transforms = Seq(new MacroCompilerTransform) ++ - getLoweringTransforms(firrtl.HighForm, firrtl.LowForm) // ++ - // Seq(new LowFirrtlOptimization) // Todo: This is dangerous + getLoweringTransforms(firrtl.HighForm, firrtl.LowForm) ++ + Seq(new MacroCompilerOptimizations) } object MacroCompiler extends App { diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 9a2a11b8e..5c41ef8b4 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -14,9 +14,9 @@ import scala.language.implicitConversions class FirrtlMacroPort(port: MacroPort) { val src = port - val isReader = !port.readEnable.isEmpty && port.writeEnable.isEmpty - val isWriter = !port.writeEnable.isEmpty && port.readEnable.isEmpty - val isReadWriter = !port.writeEnable.isEmpty && !port.readEnable.isEmpty + val isReader = port.output.nonEmpty && port.input.isEmpty + val isWriter = port.input.nonEmpty && port.output.isEmpty + val isReadWriter = port.input.nonEmpty && port.output.nonEmpty val addrType = UIntType(IntWidth(ceilLog2(port.depth) max 1)) val dataType = UIntType(IntWidth(port.width)) From c4502fca6d1e882a8cb971cc02e039158113870c Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Wed, 26 Jul 2017 00:15:35 -0700 Subject: [PATCH 045/273] map macros to sequential memeory --- macros/src/main/scala/SynFlops.scala | 71 +++++++++++++++++++++------- macros/src/main/scala/Utils.scala | 2 + 2 files changed, 56 insertions(+), 17 deletions(-) diff --git a/macros/src/main/scala/SynFlops.scala b/macros/src/main/scala/SynFlops.scala index e005196a5..d33ca43cf 100644 --- a/macros/src/main/scala/SynFlops.scala +++ b/macros/src/main/scala/SynFlops.scala @@ -31,13 +31,13 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa dataType, lib.src.depth, 1, // writeLatency - 0, // readLatency - (lib.readers ++ lib.readwriters).indices map (i => s"R_$i"), - (lib.writers ++ lib.readwriters).indices map (i => s"W_$i"), - Nil + 1, // readLatency + lib.readers.indices map (i => s"R_$i"), + lib.writers.indices map (i => s"W_$i"), + lib.readwriters.indices map (i => s"RW_$i") ) - val readConnects = (lib.readers ++ lib.readwriters).zipWithIndex flatMap { case (r, i) => + val readConnects = lib.readers.zipWithIndex flatMap { case (r, i) => val clock = portToExpression(r.src.clock) val address = portToExpression(r.src.address) val enable = (r.src chipEnable, r.src readEnable) match { @@ -54,18 +54,15 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa WSubIndex(data, k, tpe, UNKNOWNGENDER))).reverse) case _: UIntType => data } - val addrReg = WRef(s"R_${i}_addr_reg", r.addrType, RegKind) Seq( - DefRegister(NoInfo, addrReg.name, r.addrType, clock, zero, addrReg), Connect(NoInfo, memPortField(mem, s"R_$i", "clk"), clock), - Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), addrReg), + Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), address), Connect(NoInfo, memPortField(mem, s"R_$i", "en"), enable), - Connect(NoInfo, WRef(r.src.output.get.name), read), - Connect(NoInfo, addrReg, Mux(enable, address, addrReg, UnknownType)) + Connect(NoInfo, WRef(r.src.output.get.name), read) ) } - val writeConnects = (lib.writers ++ lib.readwriters).zipWithIndex flatMap { case (w, i) => + val writeConnects = lib.writers.zipWithIndex flatMap { case (w, i) => val clock = portToExpression(w.src.clock) val address = portToExpression(w.src.address) val enable = (w.src.chipEnable, w.src.writeEnable) match { @@ -96,15 +93,55 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa Seq(Connect(NoInfo, data, write), Connect(NoInfo, mask, one)) }) } - lib.module(Block(mem +: (readConnects ++ writeConnects))) + + val readwriteConnects = lib.readwriters.zipWithIndex flatMap { case (rw, i) => + val clock = portToExpression(rw.src.clock) + val address = portToExpression(rw.src.address) + val wmode = rw.src.writeEnable match { + case Some(we) => portToExpression(we) + case None => zero // is it possible? + } + val enable = (rw.src.chipEnable, rw.src.readEnable) match { + case (Some(en), Some(re)) => + and(portToExpression(en), or(portToExpression(re), wmode)) + case (Some(en), None) => portToExpression(en) + case (None, Some(re)) => or(portToExpression(re), wmode) + case (None, None) => one + } + val wmask = memPortField(mem, s"RW_$i", "wmask") + val wdata = memPortField(mem, s"RW_$i", "wdata") + val rdata = memPortField(mem, s"RW_$i", "rdata") + val write = portToExpression(rw.src.input.get) + val read = (dataType: @unchecked) match { + case VectorType(tpe, size) => cat(((0 until size) map (k => + WSubIndex(rdata, k, tpe, UNKNOWNGENDER))).reverse) + case _: UIntType => rdata + } + Seq( + Connect(NoInfo, memPortField(mem, s"RW_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"RW_$i", "addr"), address), + Connect(NoInfo, memPortField(mem, s"RW_$i", "en"), enable), + Connect(NoInfo, memPortField(mem, s"RW_$i", "wmode"), wmode), + Connect(NoInfo, WRef(rw.src.output.get.name), read) + ) ++ (dataType match { + case VectorType(tpe, size) => + val width = bitWidth(tpe).toInt + ((0 until size) map (k => + Connect(NoInfo, WSubIndex(wdata, k, tpe, UNKNOWNGENDER), + bits(write, (k + 1) * width - 1, k * width)))) ++ + ((0 until size) map (k => + Connect(NoInfo, WSubIndex(wmask, k, BoolType, UNKNOWNGENDER), + bits(WRef(rw.src.maskPort.get.name), k)))) + case _: UIntType => + Seq(Connect(NoInfo, wdata, write), Connect(NoInfo, wmask, one)) + }) + } + + lib.module(Block(mem +: (readConnects ++ writeConnects ++ readwriteConnects))) }}).toMap def run(c: Circuit): Circuit = { if (!synflops) c - else { - val circuit = c.copy(modules = (c.modules map (m => libMods getOrElse (m.name, m)))) - // print(circuit.serialize) - circuit - } + else c.copy(modules = (c.modules map (m => libMods getOrElse (m.name, m)))) } } diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 5c41ef8b4..450a33a4f 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -75,6 +75,8 @@ object Utils { def and(e1: Expression, e2: Expression) = DoPrim(PrimOps.And, Seq(e1, e2), Nil, e1.tpe) + def or(e1: Expression, e2: Expression) = + DoPrim(PrimOps.Or, Seq(e1, e2), Nil, e1.tpe) def bits(e: Expression, high: BigInt, low: BigInt): Expression = DoPrim(PrimOps.Bits, Seq(e), Seq(high, low), UIntType(IntWidth(high-low+1))) def bits(e: Expression, idx: BigInt): Expression = bits(e, idx, idx) From 9670d76a3d50f23245cb09cbf060b862de738c7b Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 26 Jul 2017 10:10:44 -0700 Subject: [PATCH 046/273] Moar SRAM generators, yum yum --- macros/src/test/scala/MacroCompilerSpec.scala | 72 ++++++++++++++----- 1 file changed, 54 insertions(+), 18 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 5ef71fc6b..ad7a538c2 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -81,35 +81,71 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate } } +// A collection of standard SRAM generators. trait HasSRAMGenerator { import mdf.macrolib._ + // Generate a standard (read/write/combo) port for testing. + def generateTestPort( + prefix: String, + width: Int, + depth: Int, + maskGran: Option[Int] = None, + read: Boolean, + readEnable: Boolean = false, + write: Boolean, + writeEnable: Boolean = false + ): MacroPort = { + val realPrefix = prefix + "_" + + MacroPort( + address=PolarizedPort(name=realPrefix + "addr", polarity=ActiveHigh), + clock=PolarizedPort(name=realPrefix + "clk", polarity=PositiveEdge), + + readEnable=if (readEnable) Some(PolarizedPort(name=realPrefix + "read_en", polarity=ActiveHigh)) else None, + writeEnable=if (writeEnable) Some(PolarizedPort(name=realPrefix + "write_en", polarity=ActiveHigh)) else None, + + output=if (read) Some(PolarizedPort(name=realPrefix + "dout", polarity=ActiveHigh)) else None, + input=if (write) Some(PolarizedPort(name=realPrefix + "din", polarity=ActiveHigh)) else None, + + maskPort=maskGran match { + case Some(x:Int) => Some(PolarizedPort(name=realPrefix + "mask", polarity=ActiveHigh)) + case _ => None + }, + maskGran=maskGran, + + width=width, depth=depth // These numbers don't matter here. + ) + } + + // Generate a read port for testing. + def generateReadPort(prefix: String, width: Int, depth: Int, readEnable: Boolean = false): MacroPort = { + generateTestPort(prefix, width, depth, write=false, read=true, readEnable=readEnable) + } + + // Generate a write port for testing. + def generateWritePort(prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None, writeEnable: Boolean = true): MacroPort = { + generateTestPort(prefix, width, depth, maskGran=maskGran, write=true, read=false, writeEnable=writeEnable) + } + + // Generate a simple read-write port for testing. + def generateReadWritePort(prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None): MacroPort = { + generateTestPort( + prefix, width, depth, maskGran=maskGran, + write=true, writeEnable=true, + read=true, readEnable=false + ) + } + // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). def generateSRAM(name: String, prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMMacro = { - val realPrefix = prefix + "_" SRAMMacro( macroType=SRAM, name=name, width=width, depth=depth, family="1rw", - ports=Seq(MacroPort( - address=PolarizedPort(name=realPrefix + "addr", polarity=ActiveHigh), - clock=PolarizedPort(name=realPrefix + "clk", polarity=PositiveEdge), - - writeEnable=Some(PolarizedPort(name=realPrefix + "write_en", polarity=ActiveHigh)), - - output=Some(PolarizedPort(name=realPrefix + "dout", polarity=ActiveHigh)), - input=Some(PolarizedPort(name=realPrefix + "din", polarity=ActiveHigh)), - - maskPort=maskGran match { - case Some(x:Int) => Some(PolarizedPort(name=realPrefix + "mask", polarity=ActiveHigh)) - case _ => None - }, - maskGran=maskGran, - - width=width, depth=depth // These numbers don't matter here. - )), + ports=Seq(generateReadWritePort(prefix, width, depth, maskGran)), extraPorts=extraPorts ) } From 79f73311d89348fce376b7b5fdc092ca756a7ce9 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 26 Jul 2017 10:22:27 -0700 Subject: [PATCH 047/273] Uniquify names --- macros/src/test/scala/SimpleSplitDepth.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 94322fe10..3471f9b19 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -31,8 +31,8 @@ trait HasSimpleDepthTestGenerator { val memMaskBits = if (memHasMask) width / mem_maskGran.get else 0 val libMaskBits = if (libHasMask) width / lib_maskGran.get else 0 // Generate "mrw" vs "rw" tags. - val memTag = (if (memHasMask) "m" else "") + "rw" - val libTag = (if (libHasMask) "m" else "") + "rw" + val memTag = (if (memHasMask) "m" else "") + "rw" + (if (mem_maskGran.nonEmpty) s"_gran${mem_maskGran.get}" else "") + val libTag = (if (libHasMask) "m" else "") + "rw" + (if (lib_maskGran.nonEmpty) s"_gran${lib_maskGran.get}" else "") val mem = s"mem-${mem_depth}x${width}-${memTag}.json" val lib = s"lib-${lib_depth}x${width}-${libTag}.json" From ae139ede44438e05e1345b68f6c78627c9e9627e Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 26 Jul 2017 10:34:31 -0700 Subject: [PATCH 048/273] Fix another name collision --- macros/src/test/scala/SimpleSplitDepth.scala | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 3471f9b19..62d6be7d0 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -18,6 +18,7 @@ trait HasSimpleDepthTestGenerator { def mem_maskGran: Option[Int] = None def lib_maskGran: Option[Int] = None def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() + def extraTag: String = "" require (mem_depth >= lib_depth) @@ -34,9 +35,11 @@ trait HasSimpleDepthTestGenerator { val memTag = (if (memHasMask) "m" else "") + "rw" + (if (mem_maskGran.nonEmpty) s"_gran${mem_maskGran.get}" else "") val libTag = (if (libHasMask) "m" else "") + "rw" + (if (lib_maskGran.nonEmpty) s"_gran${lib_maskGran.get}" else "") - val mem = s"mem-${mem_depth}x${width}-${memTag}.json" - val lib = s"lib-${lib_depth}x${width}-${libTag}.json" - val v = s"split_depth_${mem_depth}x${width}_${memTag}.v" + val extraTagPrefixed = if (extraTag == "") "" else ("-" + extraTag) + + val mem = s"mem-${mem_depth}x${width}-${memTag}${extraTagPrefixed}.json" + val lib = s"lib-${lib_depth}x${width}-${libTag}${extraTagPrefixed}.json" + val v = s"split_depth_${mem_depth}x${width}_${memTag}${extraTagPrefixed}.v" val mem_name = "target_memory" val mem_addr_width = ceilLog2(mem_depth) @@ -403,6 +406,7 @@ class SplitDepth2048x8_extraPort extends MacroCompilerSpec with HasSRAMGenerator override lazy val extraPorts = List( MacroExtraPort(name="extra_port", width=8, portType=Constant, value=0xff) ) + override lazy val extraTag = "extraPort" val outputCustom = """ From 870e3c1af11aa1d2e5cf9294e85cfe287b92138e Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 26 Jul 2017 10:35:25 -0700 Subject: [PATCH 049/273] All depth tests now fully automatic --- macros/src/test/scala/SimpleSplitDepth.scala | 189 +++++++++++++------ 1 file changed, 129 insertions(+), 60 deletions(-) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 62d6be7d0..a4e8c8c33 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -336,66 +336,6 @@ class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGene //execute(mem, lib, false, output) } -//~ class SplitDepth2048x8_r_mw extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x8-r-mw.json") - //~ val lib = new File(macroDir, "lib-1024x8-r-mw.json") - //~ val v = new File(testDir, "split_depth_2048x8_r_mw.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input W0A : UInt<11> - //~ input W0I : UInt<8> - //~ input W0E : UInt<1> - //~ input W0M : UInt<1> - //~ input clock : Clock - //~ input R0A : UInt<11> - //~ output R0O : UInt<8> - - //~ node W0A_sel = bits(W0A, 10, 10) - //~ node R0A_sel = bits(R0A, 10, 10) - //~ inst mem_0_0 of vendor_sram - //~ mem_0_0.clock <= clock - //~ mem_0_0.W0A <= W0A - //~ mem_0_0.W0I <= bits(W0I, 7, 0) - //~ mem_0_0.W0M <= bits(W0M, 0, 0) - //~ mem_0_0.W0W <= and(UInt<1>("h1"), eq(W0A_sel, UInt<1>("h0"))) - //~ mem_0_0.W0E <= and(W0E, eq(W0A_sel, UInt<1>("h0"))) - //~ mem_0_0.clock <= clock - //~ mem_0_0.R0A <= R0A - //~ node R0O_0_0 = bits(mem_0_0.R0O, 7, 0) - //~ node R0O_0 = R0O_0_0 - //~ inst mem_1_0 of vendor_sram - //~ mem_1_0.clock <= clock - //~ mem_1_0.W0A <= W0A - //~ mem_1_0.W0I <= bits(W0I, 7, 0) - //~ mem_1_0.W0M <= bits(W0M, 0, 0) - //~ mem_1_0.W0W <= and(UInt<1>("h1"), eq(W0A_sel, UInt<1>("h1"))) - //~ mem_1_0.W0E <= and(W0E, eq(W0A_sel, UInt<1>("h1"))) - //~ mem_1_0.clock <= clock - //~ mem_1_0.R0A <= R0A - //~ node R0O_1_0 = bits(mem_1_0.R0O, 7, 0) - //~ node R0O_1 = R0O_1_0 - //~ R0O <= mux(eq(R0A_sel, UInt<1>("h0")), R0O_0, mux(eq(R0A_sel, UInt<1>("h1")), R0O_1, UInt<1>("h0"))) - - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input R0A : UInt<10> - //~ output R0O : UInt<8> - //~ input clock : Clock - //~ input W0A : UInt<10> - //~ input W0I : UInt<8> - //~ input W0E : UInt<1> - //~ input W0W : UInt<1> - //~ input W0M : UInt<1> - - //~ defname = vendor_sram -//~ """ - //~ compile(mem, lib, v, false) - //~ execute(mem, lib, false, output) -//~ } - // Try an extra port class SplitDepth2048x8_extraPort extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { import mdf.macrolib._ @@ -453,3 +393,132 @@ circuit target_memory : compile(mem, lib, v, false) execute(mem, lib, false, outputCustom) } + +// Split read and (masked) write ports (r+w). +class SplitDepth_SplitPorts extends MacroCompilerSpec with HasSRAMGenerator { + lazy val width = 8 + lazy val mem_depth = 2048 + lazy val lib_depth = 1024 + + override val memPrefix = testDir + override val libPrefix = testDir + + import mdf.macrolib._ + + "Non-masked split lib; split mem" should "split fine" in { + val lib = "lib-split_depth-r-mw-lib-regular-mem.json" + val mem = "mem-split_depth-r-mw-lib-regular-mem.json" + val v = "split_depth-r-mw-lib-regular-mem.v" + + val libMacro = SRAMMacro( + macroType=SRAM, + name="awesome_lib_mem", + width=width, + depth=lib_depth, + family="1r1w", + ports=Seq( + generateReadPort("innerA", width, lib_depth), + generateWritePort("innerB", width, lib_depth) + ) + ) + + val memMacro = SRAMMacro( + macroType=SRAM, + name="target_memory", + width=width, + depth=mem_depth, + family="1r1w", + ports=Seq( + generateReadPort("outerB", width, mem_depth), + generateWritePort("outerA", width, mem_depth) + ) + ) + + writeToLib(mem, Seq(memMacro)) + writeToLib(lib, Seq(libMacro)) + + val output = +""" +circuit target_memory : + module target_memory : + input outerB_clk : Clock + input outerB_addr : UInt<11> + output outerB_dout : UInt<8> + input outerA_clk : Clock + input outerA_addr : UInt<11> + input outerA_din : UInt<8> + input outerA_write_en : UInt<1> + + node outerB_addr_sel = bits(outerB_addr, 10, 10) + node outerA_addr_sel = bits(outerA_addr, 10, 10) + inst mem_0_0 of awesome_lib_mem + mem_0_0.innerA_clk <= outerB_clk + mem_0_0.innerA_addr <= outerB_addr + node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) + node outerB_dout_0 = outerB_dout_0_0 + mem_0_0.innerB_clk <= outerA_clk + mem_0_0.innerB_addr <= outerA_addr + mem_0_0.innerB_din <= bits(outerA_din, 7, 0) + mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) + inst mem_1_0 of awesome_lib_mem + mem_1_0.innerA_clk <= outerB_clk + mem_1_0.innerA_addr <= outerB_addr + node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) + node outerB_dout_1 = outerB_dout_1_0 + mem_1_0.innerB_clk <= outerA_clk + mem_1_0.innerB_addr <= outerA_addr + mem_1_0.innerB_din <= bits(outerA_din, 7, 0) + mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) + outerB_dout <= mux(eq(outerB_addr_sel, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) + + extmodule awesome_lib_mem : + input innerA_clk : Clock + input innerA_addr : UInt<10> + output innerA_dout : UInt<8> + input innerB_clk : Clock + input innerB_addr : UInt<10> + input innerB_din : UInt<8> + input innerB_write_en : UInt<1> + + defname = awesome_lib_mem +""" + + compile(mem, lib, v, false) + execute(mem, lib, false, output) + } + + "Non-masked split lib; regular mem" should "split fine" in { + // Enable this test when the memory compiler can compile non-matched + // memories (e.g. mrw mem and r+mw lib). + // Right now all we can get is a "port count must match" error. + // [edwardw]: does this even make sense? Can we compile a 2-ported memory using 1-ported memories? + pending + + val lib = "lib-split_depth-r-mw-lib-regular-mem.json" + val mem = "mem-split_depth-r-mw-lib-regular-mem.json" + val v = "split_depth-r-mw-lib-regular-mem.v" + + val libMacro = SRAMMacro( + macroType=SRAM, + name="awesome_lib_mem", + width=width, + depth=lib_depth, + family="1rw", + ports=Seq( + generateReadPort("innerA", width, lib_depth), + generateWritePort("innerB", width, lib_depth) + ) + ) + + writeToLib(mem, Seq(generateSRAM("target_memory", "outer", width, mem_depth))) + writeToLib(lib, Seq(libMacro)) + + val output = +""" +TODO +""" + + compile(mem, lib, v, false) + execute(mem, lib, false, output) + } +} From d83fb47da34a4ba537f4d1b539821b9529308e05 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 26 Jul 2017 10:58:50 -0700 Subject: [PATCH 050/273] Add split port tests --- macros/src/test/scala/MacroCompilerSpec.scala | 6 +++ macros/src/test/scala/SimpleSplitDepth.scala | 50 ++++++++++++++++--- 2 files changed, 48 insertions(+), 8 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index ad7a538c2..ac0f25d09 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -8,6 +8,12 @@ import java.io.{File, StringWriter} // TODO: we should think of a less brittle way to run these tests. abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { + /** + * Terminology note: + * mem - target memory to compile, in design (e.g. Mem() in rocket) + * lib - technology SRAM(s) to use to compile mem + */ + val macroDir: String = "tapeout/src/test/resources/macros" val testDir: String = "test_run_dir/macros" new File(testDir).mkdirs // Make sure the testDir exists diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index a4e8c8c33..b33a80792 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -394,8 +394,8 @@ circuit target_memory : execute(mem, lib, false, outputCustom) } -// Split read and (masked) write ports (r+w). -class SplitDepth_SplitPorts extends MacroCompilerSpec with HasSRAMGenerator { +// Split read and (non-masked) write ports (r+w). +class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGenerator { lazy val width = 8 lazy val mem_depth = 2048 lazy val lib_depth = 1024 @@ -406,9 +406,9 @@ class SplitDepth_SplitPorts extends MacroCompilerSpec with HasSRAMGenerator { import mdf.macrolib._ "Non-masked split lib; split mem" should "split fine" in { - val lib = "lib-split_depth-r-mw-lib-regular-mem.json" - val mem = "mem-split_depth-r-mw-lib-regular-mem.json" - val v = "split_depth-r-mw-lib-regular-mem.v" + val lib = "lib-split_depth-rw-split-lib-split-mem.json" + val mem = "mem-split_depth-rw-split-lib-split-mem.json" + val v = "split_depth-rw-split-lib-split-mem.v" val libMacro = SRAMMacro( macroType=SRAM, @@ -487,6 +487,40 @@ circuit target_memory : execute(mem, lib, false, output) } + "Non-masked regular lib; split mem" should "split fine" in { + // Enable this test when the memory compiler can compile non-matched + // memories (e.g. mrw mem and r+mw lib). + // Right now all we can get is a "port count must match" error. + pending + + val lib = "lib-split_depth-r-mw-regular-lib-split-mem.json" + val mem = "mem-split_depth-r-mw-regular-lib-split-mem.json" + val v = "split_depth-r-mw-regular-lib-split-mem.v" + + val memMacro = SRAMMacro( + macroType=SRAM, + name="target_memory", + width=width, + depth=mem_depth, + family="1r1w", + ports=Seq( + generateReadPort("outerB", width, mem_depth), + generateWritePort("outerA", width, mem_depth) + ) + ) + + writeToLib(mem, Seq(memMacro)) + writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, lib_depth))) + + val output = +""" +TODO +""" + + compile(mem, lib, v, false) + execute(mem, lib, false, output) + } + "Non-masked split lib; regular mem" should "split fine" in { // Enable this test when the memory compiler can compile non-matched // memories (e.g. mrw mem and r+mw lib). @@ -494,9 +528,9 @@ circuit target_memory : // [edwardw]: does this even make sense? Can we compile a 2-ported memory using 1-ported memories? pending - val lib = "lib-split_depth-r-mw-lib-regular-mem.json" - val mem = "mem-split_depth-r-mw-lib-regular-mem.json" - val v = "split_depth-r-mw-lib-regular-mem.v" + val lib = "lib-split_depth-rw-split-lib-regular-mem.json" + val mem = "mem-split_depth-rw-split-lib-regular-mem.json" + val v = "split_depth-rw-split-lib-regular-mem.v" val libMacro = SRAMMacro( macroType=SRAM, From dd4c55aa095acb6d299b40ccecebb9b17f4ff060 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 26 Jul 2017 11:51:03 -0700 Subject: [PATCH 051/273] Implement the rest of the split depth tests --- macros/src/test/scala/SimpleSplitDepth.scala | 187 ++++++++++++++++++- 1 file changed, 178 insertions(+), 9 deletions(-) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index b33a80792..f633364a0 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -406,9 +406,9 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener import mdf.macrolib._ "Non-masked split lib; split mem" should "split fine" in { - val lib = "lib-split_depth-rw-split-lib-split-mem.json" - val mem = "mem-split_depth-rw-split-lib-split-mem.json" - val v = "split_depth-rw-split-lib-split-mem.v" + val lib = "lib-split_depth-r-w-split-lib-split-mem.json" + val mem = "mem-split_depth-r-w-split-lib-split-mem.json" + val v = "split_depth-r-w-split-lib-split-mem.v" val libMacro = SRAMMacro( macroType=SRAM, @@ -493,9 +493,9 @@ circuit target_memory : // Right now all we can get is a "port count must match" error. pending - val lib = "lib-split_depth-r-mw-regular-lib-split-mem.json" - val mem = "mem-split_depth-r-mw-regular-lib-split-mem.json" - val v = "split_depth-r-mw-regular-lib-split-mem.v" + val lib = "lib-split_depth-r-w-regular-lib-split-mem.json" + val mem = "mem-split_depth-r-w-regular-lib-split-mem.json" + val v = "split_depth-r-w-regular-lib-split-mem.v" val memMacro = SRAMMacro( macroType=SRAM, @@ -528,9 +528,9 @@ TODO // [edwardw]: does this even make sense? Can we compile a 2-ported memory using 1-ported memories? pending - val lib = "lib-split_depth-rw-split-lib-regular-mem.json" - val mem = "mem-split_depth-rw-split-lib-regular-mem.json" - val v = "split_depth-rw-split-lib-regular-mem.v" + val lib = "lib-split_depth-r-w-split-lib-regular-mem.json" + val mem = "mem-split_depth-r-w-split-lib-regular-mem.json" + val v = "split_depth-r-w-split-lib-regular-mem.v" val libMacro = SRAMMacro( macroType=SRAM, @@ -556,3 +556,172 @@ TODO execute(mem, lib, false, output) } } + +// Split read and (masked) write ports (r+mw). +class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerator { + lazy val width = 8 + lazy val mem_depth = 2048 + lazy val lib_depth = 1024 + lazy val mem_maskGran = Some(8) + lazy val lib_maskGran = Some(1) + + override val memPrefix = testDir + override val libPrefix = testDir + + import mdf.macrolib._ + + "Masked split lib; split mem" should "split fine" in { + val lib = "lib-split_depth-r-mw-split-lib-split-mem.json" + val mem = "mem-split_depth-r-mw-split-lib-split-mem.json" + val v = "split_depth-r-mw-split-lib-split-mem.v" + + val libMacro = SRAMMacro( + macroType=SRAM, + name="awesome_lib_mem", + width=width, + depth=lib_depth, + family="1r1w", + ports=Seq( + generateReadPort("innerA", width, lib_depth), + generateWritePort("innerB", width, lib_depth, lib_maskGran) + ) + ) + + val memMacro = SRAMMacro( + macroType=SRAM, + name="target_memory", + width=width, + depth=mem_depth, + family="1r1w", + ports=Seq( + generateReadPort("outerB", width, mem_depth), + generateWritePort("outerA", width, mem_depth, mem_maskGran) + ) + ) + + writeToLib(mem, Seq(memMacro)) + writeToLib(lib, Seq(libMacro)) + + val output = +""" +circuit target_memory : + module target_memory : + input outerB_clk : Clock + input outerB_addr : UInt<11> + output outerB_dout : UInt<8> + input outerA_clk : Clock + input outerA_addr : UInt<11> + input outerA_din : UInt<8> + input outerA_write_en : UInt<1> + input outerA_mask : UInt<1> + + node outerB_addr_sel = bits(outerB_addr, 10, 10) + node outerA_addr_sel = bits(outerA_addr, 10, 10) + inst mem_0_0 of awesome_lib_mem + mem_0_0.innerA_clk <= outerB_clk + mem_0_0.innerA_addr <= outerB_addr + node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) + node outerB_dout_0 = outerB_dout_0_0 + mem_0_0.innerB_clk <= outerA_clk + mem_0_0.innerB_addr <= outerA_addr + mem_0_0.innerB_din <= bits(outerA_din, 7, 0) + mem_0_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) + mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) + inst mem_1_0 of awesome_lib_mem + mem_1_0.innerA_clk <= outerB_clk + mem_1_0.innerA_addr <= outerB_addr + node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) + node outerB_dout_1 = outerB_dout_1_0 + mem_1_0.innerB_clk <= outerA_clk + mem_1_0.innerB_addr <= outerA_addr + mem_1_0.innerB_din <= bits(outerA_din, 7, 0) + mem_1_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) + mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) + outerB_dout <= mux(eq(outerB_addr_sel, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) + + extmodule awesome_lib_mem : + input innerA_clk : Clock + input innerA_addr : UInt<10> + output innerA_dout : UInt<8> + input innerB_clk : Clock + input innerB_addr : UInt<10> + input innerB_din : UInt<8> + input innerB_write_en : UInt<1> + input innerB_mask : UInt<8> + + defname = awesome_lib_mem +""" + + compile(mem, lib, v, false) + execute(mem, lib, false, output) + } + + "Non-masked regular lib; split mem" should "split fine" in { + // Enable this test when the memory compiler can compile non-matched + // memories (e.g. mrw mem and r+mw lib). + // Right now all we can get is a "port count must match" error. + pending + + val lib = "lib-split_depth-r-mw-regular-lib-split-mem.json" + val mem = "mem-split_depth-r-mw-regular-lib-split-mem.json" + val v = "split_depth-r-mw-regular-lib-split-mem.v" + + val memMacro = SRAMMacro( + macroType=SRAM, + name="target_memory", + width=width, + depth=mem_depth, + family="1r1w", + ports=Seq( + generateReadPort("outerB", width, mem_depth), + generateWritePort("outerA", width, mem_depth, mem_maskGran) + ) + ) + + writeToLib(mem, Seq(memMacro)) + writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, lib_depth, lib_maskGran))) + + val output = +""" +TODO +""" + + compile(mem, lib, v, false) + execute(mem, lib, false, output) + } + + "Non-masked split lib; regular mem" should "split fine" in { + // Enable this test when the memory compiler can compile non-matched + // memories (e.g. mrw mem and r+mw lib). + // Right now all we can get is a "port count must match" error. + // [edwardw]: does this even make sense? Can we compile a 2-ported memory using 1-ported memories? + pending + + val lib = "lib-split_depth-r-mw-split-lib-regular-mem.json" + val mem = "mem-split_depth-r-mw-split-lib-regular-mem.json" + val v = "split_depth-r-mw-split-lib-regular-mem.v" + + val libMacro = SRAMMacro( + macroType=SRAM, + name="awesome_lib_mem", + width=width, + depth=lib_depth, + family="1rw", + ports=Seq( + generateReadPort("innerA", width, lib_depth), + generateWritePort("innerB", width, lib_depth, lib_maskGran) + ) + ) + + writeToLib(mem, Seq(generateSRAM("target_memory", "outer", width, mem_depth, mem_maskGran))) + writeToLib(lib, Seq(libMacro)) + + val output = +""" +TODO +""" + + compile(mem, lib, v, false) + execute(mem, lib, false, output) + } +} From 484906b85c6312cb09b1e1f3a5f8251f6d2c73c9 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 26 Jul 2017 16:43:23 -0700 Subject: [PATCH 052/273] Refactor test generator from depth --- macros/src/test/scala/MacroCompilerSpec.scala | 102 +++++ macros/src/test/scala/SimpleSplitDepth.scala | 357 ++++++++---------- 2 files changed, 249 insertions(+), 210 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index ac0f25d09..bac6862c1 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -3,6 +3,7 @@ package barstools.macros import firrtl.ir.{Circuit, NoInfo} import firrtl.passes.RemoveEmpty import firrtl.Parser.parse +import firrtl.Utils.ceilLog2 import java.io.{File, StringWriter} // TODO: we should think of a less brittle way to run these tests. @@ -157,6 +158,107 @@ trait HasSRAMGenerator { } } +// Generic "simple" test generator. +// Set up scaffolding for generating memories, files, etc. +// Override this generator to specify the expected FIRRTL output. +trait HasSimpleTestGenerator { + this: MacroCompilerSpec with HasSRAMGenerator => + // Override these with "override lazy val". + // Why lazy? These are used in the constructor here so overriding non-lazily + // would be too late. + def memWidth: Int + def libWidth: Int + def memDepth: Int + def libDepth: Int + def memMaskGran: Option[Int] = None + def libMaskGran: Option[Int] = None + def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() + def extraTag: String = "" + + // Override this in the sub-generator if you need a more specific name. + // Defaults to using reflection to pull the name of the test using this + // generator. + def generatorType: String = this.getClass.getSimpleName + + require (memDepth >= libDepth) + + override val memPrefix = testDir + override val libPrefix = testDir + + // Convenience variables to check if a mask exists. + val memHasMask = memMaskGran != None + val libHasMask = libMaskGran != None + // We need to figure out how many mask bits there are in the mem. + val memMaskBits = if (memHasMask) memWidth / memMaskGran.get else 0 + val libMaskBits = if (libHasMask) libWidth / libMaskGran.get else 0 + + val extraTagPrefixed = if (extraTag == "") "" else ("-" + extraTag) + + val mem = s"mem-${generatorType}${extraTagPrefixed}.json" + val lib = s"lib-${generatorType}${extraTagPrefixed}.json" + val v = s"${generatorType}${extraTagPrefixed}.v" + + val mem_name = "target_memory" + val mem_addr_width = ceilLog2(memDepth) + + val lib_name = "awesome_lib_mem" + val lib_addr_width = ceilLog2(libDepth) + + writeToLib(lib, Seq(generateSRAM(lib_name, "lib", libWidth, libDepth, libMaskGran, extraPorts))) + writeToMem(mem, Seq(generateSRAM(mem_name, "outer", memWidth, memDepth, memMaskGran))) + + // Number of lib instances needed to hold the mem. + // Round up (e.g. 1.5 instances = effectively 2 instances) + val expectedInstances = math.ceil(memDepth.toFloat / libDepth).toInt + val selectBits = mem_addr_width - lib_addr_width + + // Generate the header (contains the circuit statement and the target memory + // module. + def generateHeader(): String = { + val headerMask = if (memHasMask) s"input outer_mask : UInt<${memMaskBits}>" else "" + s""" +circuit $mem_name : + module $mem_name : + input outer_clk : Clock + input outer_addr : UInt<$mem_addr_width> + input outer_din : UInt<$memWidth> + output outer_dout : UInt<$memWidth> + input outer_write_en : UInt<1> + ${headerMask} + """ + } + + // Generate the footer (contains the target memory extmodule). + def generateFooter(): String = { + val footerMask = if (libHasMask) s"input lib_mask : UInt<${libMaskBits}>" else "" + s""" + extmodule $lib_name : + input lib_clk : Clock + input lib_addr : UInt<$lib_addr_width> + input lib_din : UInt<$libWidth> + output lib_dout : UInt<$libWidth> + input lib_write_en : UInt<1> + ${footerMask} + + defname = $lib_name + """ + } + + // Abstract method to generate body; to be overridden by specific generator type. + def generateBody(): String + + // Generate the entire output from header, body, and footer. + def generateOutput(): String = { + s""" +${generateHeader} +${generateBody} +${generateFooter} + """ + } + + val output = generateOutput() +} + //~ class RocketChipTest extends MacroCompilerSpec { //~ val mem = new File(macroDir, "rocketchip.json") //~ val lib = new File(macroDir, "mylib.json") diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index f633364a0..035421040 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -1,118 +1,53 @@ package barstools.macros -import firrtl.Utils.ceilLog2 import mdf.macrolib._ // Test the depth splitting aspect of the memory compiler. // This file is for simple tests: one read-write port, powers of two sizes, etc. // For example, implementing a 4096x32 memory using four 1024x32 memories. -trait HasSimpleDepthTestGenerator { +trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => - // Override these with "override lazy val". - // Why lazy? These are used in the constructor here so overriding non-lazily - // would be too late. def width: Int - def mem_depth: Int - def lib_depth: Int - def mem_maskGran: Option[Int] = None - def lib_maskGran: Option[Int] = None - def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() - def extraTag: String = "" - - require (mem_depth >= lib_depth) - - override val memPrefix = testDir - override val libPrefix = testDir - - // Convenience variables to check if a mask exists. - val memHasMask = mem_maskGran != None - val libHasMask = lib_maskGran != None - // We need to figure out how many mask bits there are in the mem. - val memMaskBits = if (memHasMask) width / mem_maskGran.get else 0 - val libMaskBits = if (libHasMask) width / lib_maskGran.get else 0 - // Generate "mrw" vs "rw" tags. - val memTag = (if (memHasMask) "m" else "") + "rw" + (if (mem_maskGran.nonEmpty) s"_gran${mem_maskGran.get}" else "") - val libTag = (if (libHasMask) "m" else "") + "rw" + (if (lib_maskGran.nonEmpty) s"_gran${lib_maskGran.get}" else "") - - val extraTagPrefixed = if (extraTag == "") "" else ("-" + extraTag) - - val mem = s"mem-${mem_depth}x${width}-${memTag}${extraTagPrefixed}.json" - val lib = s"lib-${lib_depth}x${width}-${libTag}${extraTagPrefixed}.json" - val v = s"split_depth_${mem_depth}x${width}_${memTag}${extraTagPrefixed}.v" - - val mem_name = "target_memory" - val mem_addr_width = ceilLog2(mem_depth) - - val lib_name = "awesome_lib_mem" - val lib_addr_width = ceilLog2(lib_depth) - - writeToLib(lib, Seq(generateSRAM(lib_name, "lib", width, lib_depth, lib_maskGran, extraPorts))) - writeToMem(mem, Seq(generateSRAM(mem_name, "outer", width, mem_depth, mem_maskGran))) - - // Number of lib instances needed to hold the mem. - // Round up (e.g. 1.5 instances = effectively 2 instances) - val expectedInstances = math.ceil(mem_depth.toFloat / lib_depth).toInt - val selectBits = mem_addr_width - lib_addr_width - - val headerMask = if (memHasMask) s"input outer_mask : UInt<${memMaskBits}>" else "" - val header = s""" -circuit $mem_name : - module $mem_name : - input outer_clk : Clock - input outer_addr : UInt<$mem_addr_width> - input outer_din : UInt<$width> - output outer_dout : UInt<$width> - input outer_write_en : UInt<1> - ${headerMask} -""" - val footerMask = if (libHasMask) s"input lib_mask : UInt<${libMaskBits}>" else "" - val footer = s""" - extmodule $lib_name : - input lib_clk : Clock - input lib_addr : UInt<$lib_addr_width> - input lib_din : UInt<$width> - output lib_dout : UInt<$width> - input lib_write_en : UInt<1> - ${footerMask} + override lazy val memWidth = width + override lazy val libWidth = width - defname = $lib_name -""" + // Generate a depth-splitting body. + override def generateBody(): String = { + var output = "" - var output = header - - if (selectBits > 0) { - output += -s""" + if (selectBits > 0) { + output += + s""" node outer_addr_sel = bits(outer_addr, ${mem_addr_width - 1}, $lib_addr_width) -""" - } + """ + } - for (i <- 0 to expectedInstances - 1) { - // We only support simple masks for now (either libMask == memMask or libMask == 1) - val maskStatement = if (libHasMask) { - if (lib_maskGran.get == mem_maskGran.get) { - s"""mem_${i}_0.lib_mask <= bits(outer_mask, 0, 0)""" - } else if (lib_maskGran.get == 1) { - // Construct a mask string. - // Each bit gets the # of bits specified in maskGran. - // Specify in descending order (MSB first) - - // This builds an array like m[1], m[1], m[0], m[0] - val maskBitsArr: Seq[String] = ((memMaskBits - 1 to 0 by -1) flatMap (maskBit => { - ((0 to mem_maskGran.get - 1) map (_ => s"bits(outer_mask, ${maskBit}, ${maskBit})")) - })) - // Now build it into a recursive string like - // cat(m[1], cat(m[1], cat(m[0], m[0]))) - val maskBitsStr: String = maskBitsArr.reverse.tail.foldLeft(maskBitsArr.reverse.head)((prev: String, next: String) => s"cat(${next}, ${prev})") - s"""mem_${i}_0.lib_mask <= ${maskBitsStr}""" - } else "" // TODO: implement when non-bitmasked memories are supported - } else "" // No mask - - val enableIdentifier = if (selectBits > 0) s"""eq(outer_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" - output += -s""" + for (i <- 0 to expectedInstances - 1) { + // We only support simple masks for now (either libMask == memMask or libMask == 1) + val maskStatement = if (libHasMask) { + if (libMaskGran.get == memMaskGran.get) { + s"""mem_${i}_0.lib_mask <= bits(outer_mask, 0, 0)""" + } else if (libMaskGran.get == 1) { + // Construct a mask string. + // Each bit gets the # of bits specified in maskGran. + // Specify in descending order (MSB first) + + // This builds an array like m[1], m[1], m[0], m[0] + val maskBitsArr: Seq[String] = ((memMaskBits - 1 to 0 by -1) flatMap (maskBit => { + ((0 to memMaskGran.get - 1) map (_ => s"bits(outer_mask, ${maskBit}, ${maskBit})")) + })) + // Now build it into a recursive string like + // cat(m[1], cat(m[1], cat(m[0], m[0]))) + val maskBitsStr: String = maskBitsArr.reverse.tail.foldLeft(maskBitsArr.reverse.head)((prev: String, next: String) => s"cat(${next}, ${prev})") + s"""mem_${i}_0.lib_mask <= ${maskBitsStr}""" + } else "" // TODO: implement when non-bitmasked memories are supported + } else "" // No mask + + val enableIdentifier = if (selectBits > 0) s"""eq(outer_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" + output += + s""" inst mem_${i}_0 of awesome_lib_mem mem_${i}_0.lib_clk <= outer_clk mem_${i}_0.lib_addr <= outer_addr @@ -121,32 +56,33 @@ s""" ${maskStatement} mem_${i}_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), ${enableIdentifier}) node outer_dout_${i} = outer_dout_${i}_0 -""" - } - def generate_outer_dout_tree(i:Int, expectedInstances: Int): String = { - if (i > expectedInstances - 1) { - "UInt<1>(\"h0\")" + """ + } + def generate_outer_dout_tree(i:Int, expectedInstances: Int): String = { + if (i > expectedInstances - 1) { + "UInt<1>(\"h0\")" + } else { + "mux(eq(outer_addr_sel, UInt<%d>(\"h%s\")), outer_dout_%d, %s)".format( + selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, expectedInstances) + ) + } + } + output += " outer_dout <= " + if (selectBits > 0) { + output += generate_outer_dout_tree(0, expectedInstances) } else { - "mux(eq(outer_addr_sel, UInt<%d>(\"h%s\")), outer_dout_%d, %s)".format( - selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, expectedInstances) - ) + output += """mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0"))""" } - } - output += " outer_dout <= " - if (selectBits > 0) { - output += generate_outer_dout_tree(0, expectedInstances) - } else { - output += """mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0"))""" - } - output += footer + return output + } } // Try different widths class SplitDepth4096x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 4096 - override lazy val lib_depth = 1024 + override lazy val memDepth = 4096 + override lazy val libDepth = 1024 compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -154,8 +90,8 @@ class SplitDepth4096x32_rw extends MacroCompilerSpec with HasSRAMGenerator with class SplitDepth4096x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 16 - override lazy val mem_depth = 4096 - override lazy val lib_depth = 1024 + override lazy val memDepth = 4096 + override lazy val libDepth = 1024 compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -163,8 +99,8 @@ class SplitDepth4096x16_rw extends MacroCompilerSpec with HasSRAMGenerator with class SplitDepth32768x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val mem_depth = 32768 - override lazy val lib_depth = 1024 + override lazy val memDepth = 32768 + override lazy val libDepth = 1024 compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -172,8 +108,8 @@ class SplitDepth32768x8_rw extends MacroCompilerSpec with HasSRAMGenerator with class SplitDepth4096x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val mem_depth = 4096 - override lazy val lib_depth = 1024 + override lazy val memDepth = 4096 + override lazy val libDepth = 1024 compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -181,8 +117,8 @@ class SplitDepth4096x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H class SplitDepth2048x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -190,8 +126,8 @@ class SplitDepth2048x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val mem_depth = 1024 - override lazy val lib_depth = 1024 + override lazy val memDepth = 1024 + override lazy val libDepth = 1024 compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -200,8 +136,8 @@ class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H // Non power of two class SplitDepth2000x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val mem_depth = 2000 - override lazy val lib_depth = 1024 + override lazy val memDepth = 2000 + override lazy val libDepth = 1024 compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -209,8 +145,8 @@ class SplitDepth2000x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val mem_depth = 2049 - override lazy val lib_depth = 1024 + override lazy val memDepth = 2049 + override lazy val libDepth = 1024 compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -221,10 +157,10 @@ class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H // Test for mem mask == lib mask (i.e. mask is a write enable bit) class SplitDepth2048x32_mrw_lib32 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(32) - override lazy val lib_maskGran = Some(32) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(32) + override lazy val libMaskGran = Some(32) compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -232,10 +168,10 @@ class SplitDepth2048x32_mrw_lib32 extends MacroCompilerSpec with HasSRAMGenerato class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(8) - override lazy val lib_maskGran = Some(8) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(8) compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -244,10 +180,10 @@ class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator // Non-bit level mask class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 64 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(32) - override lazy val lib_maskGran = Some(8) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(32) + override lazy val libMaskGran = Some(8) it should "be enabled when non-bitmasked memories are supported" is (pending) //compile(mem, lib, v, false) @@ -257,10 +193,10 @@ class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGen // Bit level mask class SplitDepth2048x32_mrw_mem16_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(16) - override lazy val lib_maskGran = Some(1) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(16) + override lazy val libMaskGran = Some(1) compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -268,10 +204,10 @@ class SplitDepth2048x32_mrw_mem16_lib1 extends MacroCompilerSpec with HasSRAMGen class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(8) - override lazy val lib_maskGran = Some(1) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(1) compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -279,10 +215,10 @@ class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGene class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(4) - override lazy val lib_maskGran = Some(1) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(4) + override lazy val libMaskGran = Some(1) compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -290,10 +226,10 @@ class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGene class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(2) - override lazy val lib_maskGran = Some(1) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(2) + override lazy val libMaskGran = Some(1) compile(mem, lib, v, false) execute(mem, lib, false, output) @@ -302,10 +238,10 @@ class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGene // Non-powers of 2 mask sizes class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(3) - override lazy val lib_maskGran = Some(1) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(3) + override lazy val libMaskGran = Some(1) it should "be enabled when non-power of two masks are supported" is (pending) //compile(mem, lib, v, false) @@ -314,10 +250,10 @@ class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGene class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(7) - override lazy val lib_maskGran = Some(1) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(7) + override lazy val libMaskGran = Some(1) it should "be enabled when non-power of two masks are supported" is (pending) //compile(mem, lib, v, false) @@ -326,10 +262,10 @@ class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGene class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 - override lazy val mem_maskGran = Some(9) - override lazy val lib_maskGran = Some(1) + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val memMaskGran = Some(9) + override lazy val libMaskGran = Some(1) it should "be enabled when non-power of two masks are supported" is (pending) //compile(mem, lib, v, false) @@ -341,14 +277,14 @@ class SplitDepth2048x8_extraPort extends MacroCompilerSpec with HasSRAMGenerator import mdf.macrolib._ override lazy val width = 8 - override lazy val mem_depth = 2048 - override lazy val lib_depth = 1024 + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 override lazy val extraPorts = List( MacroExtraPort(name="extra_port", width=8, portType=Constant, value=0xff) ) override lazy val extraTag = "extraPort" - val outputCustom = + override def generateOutput(): String = """ circuit target_memory : module target_memory : @@ -390,15 +326,16 @@ circuit target_memory : defname = awesome_lib_mem """ + compile(mem, lib, v, false) - execute(mem, lib, false, outputCustom) + execute(mem, lib, false, output) } // Split read and (non-masked) write ports (r+w). class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGenerator { lazy val width = 8 - lazy val mem_depth = 2048 - lazy val lib_depth = 1024 + lazy val memDepth = 2048 + lazy val libDepth = 1024 override val memPrefix = testDir override val libPrefix = testDir @@ -414,11 +351,11 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener macroType=SRAM, name="awesome_lib_mem", width=width, - depth=lib_depth, + depth=libDepth, family="1r1w", ports=Seq( - generateReadPort("innerA", width, lib_depth), - generateWritePort("innerB", width, lib_depth) + generateReadPort("innerA", width, libDepth), + generateWritePort("innerB", width, libDepth) ) ) @@ -426,11 +363,11 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener macroType=SRAM, name="target_memory", width=width, - depth=mem_depth, + depth=memDepth, family="1r1w", ports=Seq( - generateReadPort("outerB", width, mem_depth), - generateWritePort("outerA", width, mem_depth) + generateReadPort("outerB", width, memDepth), + generateWritePort("outerA", width, memDepth) ) ) @@ -501,16 +438,16 @@ circuit target_memory : macroType=SRAM, name="target_memory", width=width, - depth=mem_depth, + depth=memDepth, family="1r1w", ports=Seq( - generateReadPort("outerB", width, mem_depth), - generateWritePort("outerA", width, mem_depth) + generateReadPort("outerB", width, memDepth), + generateWritePort("outerA", width, memDepth) ) ) writeToLib(mem, Seq(memMacro)) - writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, lib_depth))) + writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, libDepth))) val output = """ @@ -536,15 +473,15 @@ TODO macroType=SRAM, name="awesome_lib_mem", width=width, - depth=lib_depth, + depth=libDepth, family="1rw", ports=Seq( - generateReadPort("innerA", width, lib_depth), - generateWritePort("innerB", width, lib_depth) + generateReadPort("innerA", width, libDepth), + generateWritePort("innerB", width, libDepth) ) ) - writeToLib(mem, Seq(generateSRAM("target_memory", "outer", width, mem_depth))) + writeToLib(mem, Seq(generateSRAM("target_memory", "outer", width, memDepth))) writeToLib(lib, Seq(libMacro)) val output = @@ -560,10 +497,10 @@ TODO // Split read and (masked) write ports (r+mw). class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerator { lazy val width = 8 - lazy val mem_depth = 2048 - lazy val lib_depth = 1024 - lazy val mem_maskGran = Some(8) - lazy val lib_maskGran = Some(1) + lazy val memDepth = 2048 + lazy val libDepth = 1024 + lazy val memMaskGran = Some(8) + lazy val libMaskGran = Some(1) override val memPrefix = testDir override val libPrefix = testDir @@ -579,11 +516,11 @@ class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerato macroType=SRAM, name="awesome_lib_mem", width=width, - depth=lib_depth, + depth=libDepth, family="1r1w", ports=Seq( - generateReadPort("innerA", width, lib_depth), - generateWritePort("innerB", width, lib_depth, lib_maskGran) + generateReadPort("innerA", width, libDepth), + generateWritePort("innerB", width, libDepth, libMaskGran) ) ) @@ -591,11 +528,11 @@ class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerato macroType=SRAM, name="target_memory", width=width, - depth=mem_depth, + depth=memDepth, family="1r1w", ports=Seq( - generateReadPort("outerB", width, mem_depth), - generateWritePort("outerA", width, mem_depth, mem_maskGran) + generateReadPort("outerB", width, memDepth), + generateWritePort("outerA", width, memDepth, memMaskGran) ) ) @@ -670,16 +607,16 @@ circuit target_memory : macroType=SRAM, name="target_memory", width=width, - depth=mem_depth, + depth=memDepth, family="1r1w", ports=Seq( - generateReadPort("outerB", width, mem_depth), - generateWritePort("outerA", width, mem_depth, mem_maskGran) + generateReadPort("outerB", width, memDepth), + generateWritePort("outerA", width, memDepth, memMaskGran) ) ) writeToLib(mem, Seq(memMacro)) - writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, lib_depth, lib_maskGran))) + writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, libDepth, libMaskGran))) val output = """ @@ -705,15 +642,15 @@ TODO macroType=SRAM, name="awesome_lib_mem", width=width, - depth=lib_depth, + depth=libDepth, family="1rw", ports=Seq( - generateReadPort("innerA", width, lib_depth), - generateWritePort("innerB", width, lib_depth, lib_maskGran) + generateReadPort("innerA", width, libDepth), + generateWritePort("innerB", width, libDepth, libMaskGran) ) ) - writeToLib(mem, Seq(generateSRAM("target_memory", "outer", width, mem_depth, mem_maskGran))) + writeToLib(mem, Seq(generateSRAM("target_memory", "outer", width, memDepth, memMaskGran))) writeToLib(lib, Seq(libMacro)) val output = From 4fc829a570cded8f56b7fd901f6f3a095301c140 Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Thu, 27 Jul 2017 14:34:57 -0700 Subject: [PATCH 053/273] simple bug fix --- macros/src/main/scala/Utils.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 450a33a4f..36a8ce7c4 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -43,8 +43,8 @@ class Macro(srcMacro: SRAMMacro) { val firrtlPorts = srcMacro.ports map { new FirrtlMacroPort(_) } - val writers = firrtlPorts filter (p => p.isReader) - val readers = firrtlPorts filter (p => p.isWriter) + val writers = firrtlPorts filter (p => p.isWriter) + val readers = firrtlPorts filter (p => p.isReader) val readwriters = firrtlPorts filter (p => p.isReadWriter) val sortedPorts = writers ++ readers ++ readwriters From 0bfc7a94df3ac994c4c9e92fa3c5c38e2687e7dd Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Thu, 27 Jul 2017 10:12:24 -0700 Subject: [PATCH 054/273] Make instance numbers generic for depth and width --- macros/src/test/scala/MacroCompilerSpec.scala | 5 +++-- macros/src/test/scala/SimpleSplitDepth.scala | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index bac6862c1..67ae92305 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -207,9 +207,10 @@ trait HasSimpleTestGenerator { writeToLib(lib, Seq(generateSRAM(lib_name, "lib", libWidth, libDepth, libMaskGran, extraPorts))) writeToMem(mem, Seq(generateSRAM(mem_name, "outer", memWidth, memDepth, memMaskGran))) - // Number of lib instances needed to hold the mem. + // Number of lib instances needed to hold the mem, in both directions. // Round up (e.g. 1.5 instances = effectively 2 instances) - val expectedInstances = math.ceil(memDepth.toFloat / libDepth).toInt + val depthInstances = math.ceil(memDepth.toFloat / libDepth).toInt + val widthInstances = math.ceil(memWidth.toFloat / libWidth).toInt val selectBits = mem_addr_width - lib_addr_width // Generate the header (contains the circuit statement and the target memory diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 035421040..c896b449c 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -24,7 +24,7 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { """ } - for (i <- 0 to expectedInstances - 1) { + for (i <- 0 to depthInstances - 1) { // We only support simple masks for now (either libMask == memMask or libMask == 1) val maskStatement = if (libHasMask) { if (libMaskGran.get == memMaskGran.get) { @@ -58,18 +58,18 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { node outer_dout_${i} = outer_dout_${i}_0 """ } - def generate_outer_dout_tree(i:Int, expectedInstances: Int): String = { - if (i > expectedInstances - 1) { + def generate_outer_dout_tree(i:Int, depthInstances: Int): String = { + if (i > depthInstances - 1) { "UInt<1>(\"h0\")" } else { "mux(eq(outer_addr_sel, UInt<%d>(\"h%s\")), outer_dout_%d, %s)".format( - selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, expectedInstances) + selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, depthInstances) ) } } output += " outer_dout <= " if (selectBits > 0) { - output += generate_outer_dout_tree(0, expectedInstances) + output += generate_outer_dout_tree(0, depthInstances) } else { output += """mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0"))""" } From 2a8d8803a9e4fe2b05317b1e3c5cbb6f8b2a2ef2 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Thu, 27 Jul 2017 20:08:47 -0700 Subject: [PATCH 055/273] Further refactor test generator --- macros/src/test/scala/MacroCompilerSpec.scala | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 67ae92305..e7133b9f5 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -204,18 +204,33 @@ trait HasSimpleTestGenerator { val lib_name = "awesome_lib_mem" val lib_addr_width = ceilLog2(libDepth) - writeToLib(lib, Seq(generateSRAM(lib_name, "lib", libWidth, libDepth, libMaskGran, extraPorts))) - writeToMem(mem, Seq(generateSRAM(mem_name, "outer", memWidth, memDepth, memMaskGran))) + // These generate "simple" SRAMs (1 masked read-write port) but can be + // overridden if need be. + def generateLibSRAM() = generateSRAM(lib_name, "lib", libWidth, libDepth, libMaskGran, extraPorts) + def generateMemSRAM() = generateSRAM(mem_name, "outer", memWidth, memDepth, memMaskGran) + + val libSRAM = generateLibSRAM + val memSRAM = generateMemSRAM + + writeToLib(lib, Seq(libSRAM)) + writeToMem(mem, Seq(memSRAM)) // Number of lib instances needed to hold the mem, in both directions. // Round up (e.g. 1.5 instances = effectively 2 instances) val depthInstances = math.ceil(memDepth.toFloat / libDepth).toInt val widthInstances = math.ceil(memWidth.toFloat / libWidth).toInt + // Number of width bits in the last width-direction memory. + // e.g. if memWidth = 16 and libWidth = 8, this would be 8 since the last memory 0_1 has 8 bits of input width. + // e.g. if memWidth = 9 and libWidth = 8, this would be 1 since the last memory 0_1 has 1 bit of input width. + val lastWidthBits = if (memWidth % libWidth == 0) libWidth else (memWidth % libWidth) val selectBits = mem_addr_width - lib_addr_width // Generate the header (contains the circuit statement and the target memory // module. def generateHeader(): String = { + require (memSRAM.ports.size == 1, "Header generator only supports single port mem") + + val readEnable = if (memSRAM.ports(0).readEnable.isDefined) s"input outer_read_en : UInt<1>" else "" val headerMask = if (memHasMask) s"input outer_mask : UInt<${memMaskBits}>" else "" s""" circuit $mem_name : @@ -224,6 +239,7 @@ circuit $mem_name : input outer_addr : UInt<$mem_addr_width> input outer_din : UInt<$memWidth> output outer_dout : UInt<$memWidth> + ${readEnable} input outer_write_en : UInt<1> ${headerMask} """ @@ -231,6 +247,9 @@ circuit $mem_name : // Generate the footer (contains the target memory extmodule). def generateFooter(): String = { + require (libSRAM.ports.size == 1, "Footer generator only supports single port lib") + + val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input lib_read_en : UInt<1>" else "" val footerMask = if (libHasMask) s"input lib_mask : UInt<${libMaskBits}>" else "" s""" extmodule $lib_name : @@ -238,6 +257,7 @@ circuit $mem_name : input lib_addr : UInt<$lib_addr_width> input lib_din : UInt<$libWidth> output lib_dout : UInt<$libWidth> + ${readEnable} input lib_write_en : UInt<1> ${footerMask} From e02f885e4b6a5cd3973363153747792e4523c0c1 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Thu, 27 Jul 2017 20:09:14 -0700 Subject: [PATCH 056/273] Port all tests to use the generator --- macros/src/test/scala/SimpleSplitWidth.scala | 1063 ++++++++++-------- 1 file changed, 595 insertions(+), 468 deletions(-) diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index f4c9faf1a..4a48c7be1 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -1,468 +1,595 @@ -//~ package barstools.macros - -//~ import java.io.File - -//~ class SplitWidth2048x16_mrw extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") - //~ val lib = new File(macroDir, "lib-2048x8-mrw.json") - //~ val v = new File(testDir, "split_width_2048x16_mrw.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<16> - //~ output RW0O : UInt<16> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<2> - - //~ inst mem_0_0 of vendor_sram - //~ inst mem_0_1 of vendor_sram - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) - //~ mem_0_0.RW0M <= bits(RW0M, 0, 0) - //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_1.clock <= clock - //~ mem_0_1.RW0A <= RW0A - //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) - //~ mem_0_1.RW0I <= bits(RW0I, 15, 8) - //~ mem_0_1.RW0M <= bits(RW0M, 1, 1) - //~ mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - //~ node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) - //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<8> - //~ output RW0O : UInt<8> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<1> - - //~ defname = vendor_sram -//~ """ - //~ compile(mem, Some(lib), v, false) - //~ execute(Some(mem), Some(lib), false, output) -//~ } - -//~ class SplitWidth2048x16_mrw_Uneven extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") - //~ val lib = new File(macroDir, "lib-2048x10-rw.json") - //~ val v = new File(testDir, "split_width_2048x16_mrw_uneven.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<16> - //~ output RW0O : UInt<16> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<2> - - //~ inst mem_0_0 of vendor_sram - //~ inst mem_0_1 of vendor_sram - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) - //~ mem_0_0.RW0W <= and(and(RW0W, bits(RW0M, 0, 0)), UInt<1>("h1")) - //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_1.clock <= clock - //~ mem_0_1.RW0A <= RW0A - //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) - //~ mem_0_1.RW0I <= bits(RW0I, 15, 8) - //~ mem_0_1.RW0W <= and(and(RW0W, bits(RW0M, 1, 1)), UInt<1>("h1")) - //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - //~ node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) - //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<10> - //~ output RW0O : UInt<10> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - - //~ defname = vendor_sram -//~ """ - //~ compile(mem, Some(lib), v, false) - //~ execute(Some(mem), Some(lib), false, output) -//~ } - -//~ class SplitWidth2048x16_mrw_VeryUneven extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x16-mrw-2.json") - //~ val lib = new File(macroDir, "lib-2048x10-rw.json") - //~ val v = new File(testDir, "split_width_2048x16_mrw_very_uneven.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<16> - //~ output RW0O : UInt<16> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<8> - - //~ inst mem_0_0 of vendor_sram - //~ inst mem_0_1 of vendor_sram - //~ inst mem_0_2 of vendor_sram - //~ inst mem_0_3 of vendor_sram - //~ inst mem_0_4 of vendor_sram - //~ inst mem_0_5 of vendor_sram - //~ inst mem_0_6 of vendor_sram - //~ inst mem_0_7 of vendor_sram - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 1, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 1, 0) - //~ mem_0_0.RW0W <= and(and(RW0W, bits(RW0M, 0, 0)), UInt<1>("h1")) - //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_1.clock <= clock - //~ mem_0_1.RW0A <= RW0A - //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 1, 0) - //~ mem_0_1.RW0I <= bits(RW0I, 3, 2) - //~ mem_0_1.RW0W <= and(and(RW0W, bits(RW0M, 1, 1)), UInt<1>("h1")) - //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_2.clock <= clock - //~ mem_0_2.RW0A <= RW0A - //~ node RW0O_0_2 = bits(mem_0_2.RW0O, 1, 0) - //~ mem_0_2.RW0I <= bits(RW0I, 5, 4) - //~ mem_0_2.RW0W <= and(and(RW0W, bits(RW0M, 2, 2)), UInt<1>("h1")) - //~ mem_0_2.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_3.clock <= clock - //~ mem_0_3.RW0A <= RW0A - //~ node RW0O_0_3 = bits(mem_0_3.RW0O, 1, 0) - //~ mem_0_3.RW0I <= bits(RW0I, 7, 6) - //~ mem_0_3.RW0W <= and(and(RW0W, bits(RW0M, 3, 3)), UInt<1>("h1")) - //~ mem_0_3.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_4.clock <= clock - //~ mem_0_4.RW0A <= RW0A - //~ node RW0O_0_4 = bits(mem_0_4.RW0O, 1, 0) - //~ mem_0_4.RW0I <= bits(RW0I, 9, 8) - //~ mem_0_4.RW0W <= and(and(RW0W, bits(RW0M, 4, 4)), UInt<1>("h1")) - //~ mem_0_4.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_5.clock <= clock - //~ mem_0_5.RW0A <= RW0A - //~ node RW0O_0_5 = bits(mem_0_5.RW0O, 1, 0) - //~ mem_0_5.RW0I <= bits(RW0I, 11, 10) - //~ mem_0_5.RW0W <= and(and(RW0W, bits(RW0M, 5, 5)), UInt<1>("h1")) - //~ mem_0_5.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_6.clock <= clock - //~ mem_0_6.RW0A <= RW0A - //~ node RW0O_0_6 = bits(mem_0_6.RW0O, 1, 0) - //~ mem_0_6.RW0I <= bits(RW0I, 13, 12) - //~ mem_0_6.RW0W <= and(and(RW0W, bits(RW0M, 6, 6)), UInt<1>("h1")) - //~ mem_0_6.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_7.clock <= clock - //~ mem_0_7.RW0A <= RW0A - //~ node RW0O_0_7 = bits(mem_0_7.RW0O, 1, 0) - //~ mem_0_7.RW0I <= bits(RW0I, 15, 14) - //~ mem_0_7.RW0W <= and(and(RW0W, bits(RW0M, 7, 7)), UInt<1>("h1")) - //~ mem_0_7.RW0E <= and(RW0E, UInt<1>("h1")) - //~ node RW0O_0 = cat(RW0O_0_7, cat(RW0O_0_6, cat(RW0O_0_5, cat(RW0O_0_4, cat(RW0O_0_3, cat(RW0O_0_2, cat(RW0O_0_1, RW0O_0_0))))))) - //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<10> - //~ output RW0O : UInt<10> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - - //~ defname = vendor_sram -//~ """ - //~ compile(mem, Some(lib), v, false) - //~ execute(Some(mem), Some(lib), false, output) -//~ } - -//~ class SplitWidth2048x16_mrw_ReadEnable extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") - //~ val lib = new File(macroDir, "lib-2048x8-mrw-re.json") - //~ val v = new File(testDir, "split_width_2048x16_mrw_read_enable.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<16> - //~ output RW0O : UInt<16> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<2> - - //~ inst mem_0_0 of vendor_sram - //~ inst mem_0_1 of vendor_sram - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) - //~ mem_0_0.RW0R <= not(and(not(RW0W), UInt<1>("h1"))) - //~ mem_0_0.RW0M <= bits(RW0M, 0, 0) - //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_1.clock <= clock - //~ mem_0_1.RW0A <= RW0A - //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 7, 0) - //~ mem_0_1.RW0I <= bits(RW0I, 15, 8) - //~ mem_0_1.RW0R <= not(and(not(RW0W), UInt<1>("h1"))) - //~ mem_0_1.RW0M <= bits(RW0M, 1, 1) - //~ mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - //~ node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) - //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<8> - //~ output RW0O : UInt<8> - //~ input RW0E : UInt<1> - //~ input RW0R : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<1> - - //~ defname = vendor_sram -//~ """ - //~ compile(mem, Some(lib), v, false) - //~ execute(Some(mem), Some(lib), false, output) -//~ } - -//~ class SplitWidth2048x16_n28 extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") - //~ val lib = new File(macroDir, "lib-2048x16-n28.json") - //~ val v = new File(testDir, "split_width_2048x16_n28.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<16> - //~ output RW0O : UInt<16> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<2> - - //~ inst mem_0_0 of vendor_sram_16 - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 15, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 15, 0) - //~ mem_0_0.RW0M <= cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))))))))))) - //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - //~ node RW0O_0 = RW0O_0_0 - //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - //~ extmodule vendor_sram_16 : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<16> - //~ output RW0O : UInt<16> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<16> - - //~ defname = vendor_sram_16 -//~ """ - //~ compile(mem, Some(lib), v, false) - //~ execute(Some(mem), Some(lib), false, output) -//~ } - -//~ class SplitWidth2048x20_mrw_UnevenMask extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x20-mrw.json") - //~ val lib = new File(macroDir, "lib-2048x8-mrw.json") - //~ val v = new File(testDir, "split_width_2048x20_mrw_uneven_mask.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<20> - //~ output RW0O : UInt<20> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<2> - - //~ inst mem_0_0 of vendor_sram - //~ inst mem_0_1 of vendor_sram - //~ inst mem_0_2 of vendor_sram - //~ inst mem_0_3 of vendor_sram - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 7, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 7, 0) - //~ mem_0_0.RW0M <= bits(RW0M, 0, 0) - //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_1.clock <= clock - //~ mem_0_1.RW0A <= RW0A - //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 1, 0) - //~ mem_0_1.RW0I <= bits(RW0I, 9, 8) - //~ mem_0_1.RW0M <= bits(RW0M, 0, 0) - //~ mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_2.clock <= clock - //~ mem_0_2.RW0A <= RW0A - //~ node RW0O_0_2 = bits(mem_0_2.RW0O, 7, 0) - //~ mem_0_2.RW0I <= bits(RW0I, 17, 10) - //~ mem_0_2.RW0M <= bits(RW0M, 1, 1) - //~ mem_0_2.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_2.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_3.clock <= clock - //~ mem_0_3.RW0A <= RW0A - //~ node RW0O_0_3 = bits(mem_0_3.RW0O, 1, 0) - //~ mem_0_3.RW0I <= bits(RW0I, 19, 18) - //~ mem_0_3.RW0M <= bits(RW0M, 1, 1) - //~ mem_0_3.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_3.RW0E <= and(RW0E, UInt<1>("h1")) - //~ node RW0O_0 = cat(RW0O_0_3, cat(RW0O_0_2, cat(RW0O_0_1, RW0O_0_0))) - //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<8> - //~ output RW0O : UInt<8> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<1> - - //~ defname = vendor_sram -//~ """ - //~ compile(mem, Some(lib), v, false) - //~ execute(Some(mem), Some(lib), false, output) -//~ } - -//~ class SplitWidth24x52 extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-24x52-r-w.json") - //~ val lib = new File(macroDir, "lib-32x32-2rw.json") - //~ val v = new File(testDir, "split_width_24x52.v") - //~ val output = -//~ """ -//~ circuit entries_info_ext : - //~ module entries_info_ext : - //~ input R0_clk : Clock - //~ input R0_addr : UInt<5> - //~ output R0_data : UInt<52> - //~ input R0_en : UInt<1> - //~ input W0_clk : Clock - //~ input W0_addr : UInt<5> - //~ input W0_data : UInt<52> - //~ input W0_en : UInt<1> - - //~ inst mem_0_0 of SRAM2RW32x32 - //~ inst mem_0_1 of SRAM2RW32x32 - //~ mem_0_0.CE1 <= W0_clk - //~ mem_0_0.A1 <= W0_addr - //~ mem_0_0.I1 <= bits(W0_data, 31, 0) - //~ mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_0.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - //~ mem_0_1.CE1 <= W0_clk - //~ mem_0_1.A1 <= W0_addr - //~ mem_0_1.I1 <= bits(W0_data, 51, 32) - //~ mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_1.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - //~ mem_0_0.CE2 <= R0_clk - //~ mem_0_0.A2 <= R0_addr - //~ node R0_data_0_0 = bits(mem_0_0.O2, 31, 0) - //~ mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - //~ mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_0.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - //~ mem_0_1.CE2 <= R0_clk - //~ mem_0_1.A2 <= R0_addr - //~ node R0_data_0_1 = bits(mem_0_1.O2, 19, 0) - //~ mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - //~ mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - //~ node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) - //~ R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) - - //~ extmodule SRAM2RW32x32 : - //~ input CE1 : Clock - //~ input A1 : UInt<5> - //~ input I1 : UInt<32> - //~ output O1 : UInt<32> - //~ input CSB1 : UInt<1> - //~ input OEB1 : UInt<1> - //~ input WEB1 : UInt<1> - //~ input CE2 : Clock - //~ input A2 : UInt<5> - //~ input I2 : UInt<32> - //~ output O2 : UInt<32> - //~ input CSB2 : UInt<1> - //~ input OEB2 : UInt<1> - //~ input WEB2 : UInt<1> - - //~ defname = SRAM2RW32x32 -//~ """ - //~ compile(mem, Some(lib), v, false) - //~ execute(Some(mem), Some(lib), false, output) -//~ } - -//~ class SplitWidth32x160 extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-32x160-mrw.json") - //~ val lib = new File(macroDir, "lib-32x80-mrw.json") - //~ val v = new File(testDir, "split_width_32x160.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<5> - //~ input RW0I : UInt<160> - //~ output RW0O : UInt<160> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<8> - - //~ inst mem_0_0 of vendor_sram - //~ inst mem_0_1 of vendor_sram - //~ mem_0_0.clock <= clock - //~ mem_0_0.RW0A <= RW0A - //~ node RW0O_0_0 = bits(mem_0_0.RW0O, 79, 0) - //~ mem_0_0.RW0I <= bits(RW0I, 79, 0) - //~ mem_0_0.RW0M <= cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 3, 3), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 2, 2), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 1, 1), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), cat(bits(RW0M, 0, 0), bits(RW0M, 0, 0)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) - //~ mem_0_0.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_0.RW0E <= and(RW0E, UInt<1>("h1")) - //~ mem_0_1.clock <= clock - //~ mem_0_1.RW0A <= RW0A - //~ node RW0O_0_1 = bits(mem_0_1.RW0O, 79, 0) - //~ mem_0_1.RW0I <= bits(RW0I, 159, 80) - //~ mem_0_1.RW0M <= cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 7, 7), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 6, 6), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 5, 5), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), cat(bits(RW0M, 4, 4), bits(RW0M, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) - //~ mem_0_1.RW0W <= and(RW0W, UInt<1>("h1")) - //~ mem_0_1.RW0E <= and(RW0E, UInt<1>("h1")) - //~ node RW0O_0 = cat(RW0O_0_1, RW0O_0_0) - //~ RW0O <= mux(UInt<1>("h1"), RW0O_0, UInt<1>("h0")) - - //~ extmodule vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<5> - //~ input RW0I : UInt<80> - //~ output RW0O : UInt<80> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<80> - - //~ defname = vendor_sram -//~ """ - //~ compile(mem, Some(lib), v, false) - //~ execute(Some(mem), Some(lib), false, output) -//~ } +package barstools.macros + +// Test the width splitting aspect of the memory compiler. +// For example, implementing a 1024x32 memory using four 1024x8 memories. + +trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { + this: MacroCompilerSpec with HasSRAMGenerator => + def depth: Int + + override lazy val memDepth = depth + override lazy val libDepth = depth + + override def generateBody(): String = { + val output = new StringBuilder + + // Generate mem_0_ lines for number of width instances. + output.append( + ((0 to widthInstances - 1) map {i:Int => s""" + inst mem_0_${i} of ${lib_name} +""" + }).reduceLeft(_ + _) + ) + + // Generate submemory connection blocks. + output append (for (i <- 0 to widthInstances - 1) yield { + // Width of this submemory. + val myMemWidth = if (i == widthInstances - 1) lastWidthBits else libWidth + // Base bit of this submemory. + // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this + // would be 16. + val myBaseBit = libWidth*i + + val maskStatement = if (libMaskGran.isDefined) { + if (memMaskGran.isEmpty) { + // If there is no memory mask, we should just turn all the lib mask + // bits high. + s"""mem_0_${i}.lib_mask <= UInt<${libMaskBits}>("h${((1 << libMaskBits) - 1).toHexString}")""" + } else if (libMaskGran.get == memMaskGran.get) { + s"mem_0_${i}.lib_mask <= bits(outer_mask, ${i}, ${i})" + } else if (libMaskGran.get == 1) { + // Calculate which bit of outer_mask contains the given bit. + // e.g. if memMaskGran = 2, libMaskGran = 1 and libWidth = 4, then + // calculateMaskBit({0, 1}) = 0 and calculateMaskBit({1, 2}) = 1 + def calculateMaskBit(bit:Int): Int = (bit / libMaskGran.get) / memMaskGran.getOrElse(memWidth) + + val bitsArr = ((libMaskBits - 1 to 0 by -1) map (x => { + val outerMaskBit = calculateMaskBit(x*libMaskGran.get + myBaseBit) + s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" + })) + val maskVal = bitsArr.init.foldRight(bitsArr.last)((bit, rest) => s"cat($bit, $rest)") + s"mem_0_${i}.lib_mask <= ${maskVal}" + } else "" // We support only bit-level masks for now. + } else "" +s""" + mem_0_${i}.lib_clk <= outer_clk + mem_0_${i}.lib_addr <= outer_addr + node outer_dout_0_${i} = bits(mem_0_${i}.lib_dout, ${myMemWidth - 1}, 0) + mem_0_${i}.lib_din <= bits(outer_din, ${myBaseBit + myMemWidth - 1}, ${myBaseBit}) + ${maskStatement} + mem_0_${i}.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) +""" + }).reduceLeft(_ + _) + + // Generate final output that concats together the sub-memories. + // e.g. cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0)) + output append { + val doutStatements = ((widthInstances - 1 to 0 by -1) map (i => s"outer_dout_0_${i}")) + val catStmt = doutStatements.init.foldRight(doutStatements.last)((l: String, r: String) => s"cat($l, $r)") +s""" + node outer_dout_0 = ${catStmt} +""" + } + + output append +""" + outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) +""" + output.toString + } +} + +// Try different widths against a base memory width of 8. +class SplitWidth1024x128_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 128 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x64_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 64 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 8 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Try different widths against a base memory width of 16. +class SplitWidth1024x128_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 128 + override lazy val libWidth = 16 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x64_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 64 + override lazy val libWidth = 16 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x32_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val libWidth = 16 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 16 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Try different widths against a base memory width of 8 but depth 512 instead of 1024. +class SplitWidth512x128_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 512 + override lazy val memWidth = 128 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth512x64_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 512 + override lazy val memWidth = 64 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth512x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 512 + override lazy val memWidth = 32 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth512x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 512 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth512x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 512 + override lazy val memWidth = 8 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Try non-power of two widths against a base memory width of 8. +class SplitWidth1024x67_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 67 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x60_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 60 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x42_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 42 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x20_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 20 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x17_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 17 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x15_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 15 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x9_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 9 + override lazy val libWidth = 8 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Try against a non-power of two base memory width. +class SplitWidth1024x64_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 64 + override lazy val libWidth = 11 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x33_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 33 + override lazy val libWidth = 11 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 11 + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Masked RAM + +class SplitWidth1024x8_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 8 + override lazy val libWidth = 8 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_memGran_8_libGran_8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(8) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x128_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 128 + override lazy val libWidth = 32 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_memGran_4_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val memMaskGran = Some(4) + override lazy val libMaskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_memGran_2_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val memMaskGran = Some(2) + override lazy val libMaskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_memGran_16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val memMaskGran = Some(16) + override lazy val libMaskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Non-masked mem, masked lib + +class SplitWidth1024x16_libGran_8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val libMaskGran = Some(8) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val libMaskGran = Some(1) + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +// Non-memMask and non-1 libMask + +class SplitWidth1024x16_memGran_8_libGran_2_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(2) + + it should "be enabled when non-bit masks are supported" is (pending) + //~ compile(mem, lib, v, false) + //~ execute(mem, lib, false, output) +} + +// Non-power of two memGran + +class SplitWidth1024x16_memGran_9_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val libWidth = 8 + override lazy val memMaskGran = Some(9) + override lazy val libMaskGran = Some(1) + + it should "be enabled when non-power of two masks are supported" is (pending) + //~ compile(mem, lib, v, false) + //~ execute(mem, lib, false, output) +} + +// Read enable + +class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + import mdf.macrolib._ + + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val libWidth = 8 + + override def generateLibSRAM() = { + SRAMMacro( + macroType=SRAM, + name=lib_name, + width=libWidth, + depth=libDepth, + family="1rw", + ports=Seq(generateTestPort( + "lib", libWidth, libDepth, maskGran=libMaskGran, + write=true, writeEnable=true, + read=true, readEnable=true + )) + ) + } + + override def generateBody() = +""" + inst mem_0_0 of awesome_lib_mem + inst mem_0_1 of awesome_lib_mem + inst mem_0_2 of awesome_lib_mem + inst mem_0_3 of awesome_lib_mem + mem_0_0.lib_clk <= outer_clk + mem_0_0.lib_addr <= outer_addr + node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) + mem_0_0.lib_din <= bits(outer_din, 7, 0) + mem_0_0.lib_read_en <= and(not(outer_write_en), UInt<1>("h1")) + mem_0_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.lib_clk <= outer_clk + mem_0_1.lib_addr <= outer_addr + node outer_dout_0_1 = bits(mem_0_1.lib_dout, 7, 0) + mem_0_1.lib_din <= bits(outer_din, 15, 8) + mem_0_1.lib_read_en <= and(not(outer_write_en), UInt<1>("h1")) + mem_0_1.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_2.lib_clk <= outer_clk + mem_0_2.lib_addr <= outer_addr + node outer_dout_0_2 = bits(mem_0_2.lib_dout, 7, 0) + mem_0_2.lib_din <= bits(outer_din, 23, 16) + mem_0_2.lib_read_en <= and(not(outer_write_en), UInt<1>("h1")) + mem_0_2.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_3.lib_clk <= outer_clk + mem_0_3.lib_addr <= outer_addr + node outer_dout_0_3 = bits(mem_0_3.lib_dout, 7, 0) + mem_0_3.lib_din <= bits(outer_din, 31, 24) + mem_0_3.lib_read_en <= and(not(outer_write_en), UInt<1>("h1")) + mem_0_3.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + node outer_dout_0 = cat(outer_dout_0_3, cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0))) + outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) +""" + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + import mdf.macrolib._ + + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val libWidth = 8 + + override def generateMemSRAM() = { + SRAMMacro( + macroType=SRAM, + name=mem_name, + width=memWidth, + depth=memDepth, + family="1rw", + ports=Seq(generateTestPort( + "outer", memWidth, memDepth, maskGran=memMaskGran, + write=true, writeEnable=true, + read=true, readEnable=true + )) + ) + } + + // No need to override body here due to the lack of a readEnable in the lib. + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} + +class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + import mdf.macrolib._ + + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val libWidth = 8 + + override def generateLibSRAM() = { + SRAMMacro( + macroType=SRAM, + name=lib_name, + width=libWidth, + depth=libDepth, + family="1rw", + ports=Seq(generateTestPort( + "lib", libWidth, libDepth, maskGran=libMaskGran, + write=true, writeEnable=true, + read=true, readEnable=true + )) + ) + } + + override def generateMemSRAM() = { + SRAMMacro( + macroType=SRAM, + name=mem_name, + width=memWidth, + depth=memDepth, + family="1rw", + ports=Seq(generateTestPort( + "outer", memWidth, memDepth, maskGran=memMaskGran, + write=true, writeEnable=true, + read=true, readEnable=true + )) + ) + } + + override def generateBody() = +""" + inst mem_0_0 of awesome_lib_mem + inst mem_0_1 of awesome_lib_mem + inst mem_0_2 of awesome_lib_mem + inst mem_0_3 of awesome_lib_mem + mem_0_0.lib_clk <= outer_clk + mem_0_0.lib_addr <= outer_addr + node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) + mem_0_0.lib_din <= bits(outer_din, 7, 0) + mem_0_0.lib_read_en <= and(outer_read_en, UInt<1>("h1")) + mem_0_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.lib_clk <= outer_clk + mem_0_1.lib_addr <= outer_addr + node outer_dout_0_1 = bits(mem_0_1.lib_dout, 7, 0) + mem_0_1.lib_din <= bits(outer_din, 15, 8) + mem_0_1.lib_read_en <= and(outer_read_en, UInt<1>("h1")) + mem_0_1.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_2.lib_clk <= outer_clk + mem_0_2.lib_addr <= outer_addr + node outer_dout_0_2 = bits(mem_0_2.lib_dout, 7, 0) + mem_0_2.lib_din <= bits(outer_din, 23, 16) + mem_0_2.lib_read_en <= and(outer_read_en, UInt<1>("h1")) + mem_0_2.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_3.lib_clk <= outer_clk + mem_0_3.lib_addr <= outer_addr + node outer_dout_0_3 = bits(mem_0_3.lib_dout, 7, 0) + mem_0_3.lib_din <= bits(outer_din, 31, 24) + mem_0_3.lib_read_en <= and(outer_read_en, UInt<1>("h1")) + mem_0_3.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + node outer_dout_0 = cat(outer_dout_0_3, cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0))) + outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) +""" + + compile(mem, lib, v, false) + execute(mem, lib, false, output) +} From 2126835df2fe64f585b5fb8acf8ffdf53396683a Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Thu, 27 Jul 2017 20:16:19 -0700 Subject: [PATCH 057/273] Clarify comments --- macros/src/test/scala/MacroCompilerSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index e7133b9f5..e1316608f 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -204,8 +204,8 @@ trait HasSimpleTestGenerator { val lib_name = "awesome_lib_mem" val lib_addr_width = ceilLog2(libDepth) - // These generate "simple" SRAMs (1 masked read-write port) but can be - // overridden if need be. + // These generate "simple" SRAMs (1 masked read-write port) by default, + // but can be overridden if need be. def generateLibSRAM() = generateSRAM(lib_name, "lib", libWidth, libDepth, libMaskGran, extraPorts) def generateMemSRAM() = generateSRAM(mem_name, "outer", memWidth, memDepth, memMaskGran) From b546f49a857c3c8a6ff885b192e210bb847a47b3 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Thu, 27 Jul 2017 20:16:29 -0700 Subject: [PATCH 058/273] Fix tests by reordering statements Not sure what caused this re-ordering but it doesn't seem to affect anything? --- macros/src/test/scala/SimpleSplitDepth.scala | 32 ++++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index c896b449c..46b4420a9 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -389,23 +389,23 @@ circuit target_memory : node outerB_addr_sel = bits(outerB_addr, 10, 10) node outerA_addr_sel = bits(outerA_addr, 10, 10) inst mem_0_0 of awesome_lib_mem - mem_0_0.innerA_clk <= outerB_clk - mem_0_0.innerA_addr <= outerB_addr - node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) - node outerB_dout_0 = outerB_dout_0_0 mem_0_0.innerB_clk <= outerA_clk mem_0_0.innerB_addr <= outerA_addr mem_0_0.innerB_din <= bits(outerA_din, 7, 0) mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) + mem_0_0.innerA_clk <= outerB_clk + mem_0_0.innerA_addr <= outerB_addr + node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) + node outerB_dout_0 = outerB_dout_0_0 inst mem_1_0 of awesome_lib_mem - mem_1_0.innerA_clk <= outerB_clk - mem_1_0.innerA_addr <= outerB_addr - node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) - node outerB_dout_1 = outerB_dout_1_0 mem_1_0.innerB_clk <= outerA_clk mem_1_0.innerB_addr <= outerA_addr mem_1_0.innerB_din <= bits(outerA_din, 7, 0) mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) + mem_1_0.innerA_clk <= outerB_clk + mem_1_0.innerA_addr <= outerB_addr + node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) + node outerB_dout_1 = outerB_dout_1_0 outerB_dout <= mux(eq(outerB_addr_sel, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : @@ -555,25 +555,25 @@ circuit target_memory : node outerB_addr_sel = bits(outerB_addr, 10, 10) node outerA_addr_sel = bits(outerA_addr, 10, 10) inst mem_0_0 of awesome_lib_mem - mem_0_0.innerA_clk <= outerB_clk - mem_0_0.innerA_addr <= outerB_addr - node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) - node outerB_dout_0 = outerB_dout_0_0 mem_0_0.innerB_clk <= outerA_clk mem_0_0.innerB_addr <= outerA_addr mem_0_0.innerB_din <= bits(outerA_din, 7, 0) mem_0_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) + mem_0_0.innerA_clk <= outerB_clk + mem_0_0.innerA_addr <= outerB_addr + node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) + node outerB_dout_0 = outerB_dout_0_0 inst mem_1_0 of awesome_lib_mem - mem_1_0.innerA_clk <= outerB_clk - mem_1_0.innerA_addr <= outerB_addr - node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) - node outerB_dout_1 = outerB_dout_1_0 mem_1_0.innerB_clk <= outerA_clk mem_1_0.innerB_addr <= outerA_addr mem_1_0.innerB_din <= bits(outerA_din, 7, 0) mem_1_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) + mem_1_0.innerA_clk <= outerB_clk + mem_1_0.innerA_addr <= outerB_addr + node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) + node outerB_dout_1 = outerB_dout_1_0 outerB_dout <= mux(eq(outerB_addr_sel, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : From d5b30c420bd5cc73a06cb61b896bae01062a29ca Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Thu, 27 Jul 2017 20:35:05 -0700 Subject: [PATCH 059/273] Add comment --- macros/src/main/scala/SynFlops.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/src/main/scala/SynFlops.scala b/macros/src/main/scala/SynFlops.scala index d33ca43cf..48ee368ca 100644 --- a/macros/src/main/scala/SynFlops.scala +++ b/macros/src/main/scala/SynFlops.scala @@ -31,7 +31,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa dataType, lib.src.depth, 1, // writeLatency - 1, // readLatency + 1, // readLatency. This is possible because of VerilogMemDelays lib.readers.indices map (i => s"R_$i"), lib.writers.indices map (i => s"W_$i"), lib.readwriters.indices map (i => s"RW_$i") From ba33306e3009a9f0ebeaa7337efd2ef5ba95c858 Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Fri, 28 Jul 2017 11:40:28 -0700 Subject: [PATCH 060/273] output selection signals should be piped --- macros/src/main/scala/MacroCompiler.scala | 40 ++++++++++++++++++----- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index ec70949ef..78999cf8b 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -67,18 +67,34 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Serial mapping val stmts = ArrayBuffer[Statement]() - val selects = HashMap[String, Expression]() val outputs = HashMap[String, ArrayBuffer[(Expression, Expression)]]() + val selects = HashMap[String, Expression]() + val selectRegs = HashMap[String, Expression]() /* Palmer: If we've got a parallel memory then we've got to take the - * address bits into account. */ + * address bits into account. */ if (mem.src.depth > lib.src.depth) { mem.src.ports foreach { port => val high = ceilLog2(mem.src.depth) val low = ceilLog2(lib.src.depth) val ref = WRef(port.address.name) - val name = s"${ref.name}_sel" - selects(ref.name) = WRef(name, UIntType(IntWidth(high-low))) - stmts += DefNode(NoInfo, name, bits(ref, high-1, low)) + val nodeName = s"${ref.name}_sel" + val tpe = UIntType(IntWidth(high-low)) + selects(ref.name) = WRef(nodeName, tpe) + stmts += DefNode(NoInfo, nodeName, bits(ref, high-1, low)) + // Donggyu: output selection should be piped + if (port.output.isDefined) { + val regName = s"${ref.name}_sel_reg" + val enable = (port.chipEnable, port.readEnable) match { + case (Some(ce), Some(re)) => + and(WRef(ce.name, BoolType), WRef(re.name, BoolType)) + case (Some(ce), None) => WRef(ce.name, BoolType) + case (None, Some(re)) => WRef(re.name, BoolType) + case (None, None) => one + } + selectRegs(ref.name) = WRef(regName, tpe) + stmts += DefRegister(NoInfo, regName, tpe, WRef(port.clock.name), zero, WRef(regName)) + stmts += Connect(NoInfo, WRef(regName), Mux(enable, WRef(nodeName), WRef(regName), tpe)) + } } } for ((off, i) <- (0 until mem.src.depth by lib.src.depth).zipWithIndex) { @@ -97,7 +113,15 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], val index = UIntLiteral(i, IntWidth(bitWidth(addr.tpe))) DoPrim(PrimOps.Eq, Seq(addr, index), Nil, index.tpe) } - def andAddrMatch(e: Expression) = and(e, addrMatch) + val addrMatchReg = selectRegs get memPort.src.address.name match { + case None => one + case Some(reg) => + val index = UIntLiteral(i, IntWidth(bitWidth(reg.tpe))) + DoPrim(PrimOps.Eq, Seq(reg, index), Nil, index.tpe) + } + def andAddrMatch(e: Expression) = { + and(e, addrMatch) + } val cats = ArrayBuffer[Expression]() for (((low, high), j) <- pairs.zipWithIndex) { val inst = WRef(s"mem_${i}_${j}", lib.tpe) @@ -272,7 +296,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], val name = s"${mem}_${i}" stmts += DefNode(NoInfo, name, cat(cats.toSeq.reverse)) (outputs getOrElseUpdate (mem, ArrayBuffer[(Expression, Expression)]())) += - (addrMatch -> WRef(name)) + (addrMatchReg -> WRef(name)) case _ => } } @@ -353,7 +377,7 @@ class MacroCompilerTransform extends Transform { val transforms = Seq( new MacroCompilerPass(mems, libs), new SynFlopsPass(synflops, libs getOrElse mems.get)) - (transforms foldLeft state)((s, xform) => xform runTransform s).copy(form=outputForm) + (transforms foldLeft state)((s, xform) => xform runTransform s).copy(form=outputForm) case _ => state } } From 937b053b15cff16bd5745089088ff08480ff9a6e Mon Sep 17 00:00:00 2001 From: Donggyu Kim Date: Mon, 31 Jul 2017 01:54:19 -0700 Subject: [PATCH 061/273] consider mask grans for cost --- macros/src/main/scala/MacroCompiler.scala | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 78999cf8b..129d86a17 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -335,8 +335,14 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + // (mem.depth * mem.width) // Donggyu: I re-define cost + val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) + val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) + val memWidth = (memMask, libMask) match { + case (Some(1), Some(1)) | (None, _) => mem.src.width + case (Some(p), _) => p // assume that the memory consists of smaller chunks + } val cost = (((mem.src.depth - 1) / lib.src.depth) + 1) * - (((mem.src.width - 1) / lib.src.width) + 1) * + (((memWidth - 1) / lib.src.width) + 1) * (lib.src.depth * lib.src.width + 1) // weights on # cells System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${cost}") if (cost > area) (best, area) From cca6c0ea7e44e88b7ea451c1bbe7db541e9f080f Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 28 Jul 2017 12:49:29 -0700 Subject: [PATCH 062/273] Refactor memory compiler, again --- macros/src/test/scala/MacroCompilerSpec.scala | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index e1316608f..87ff3651d 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -52,8 +52,10 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate mdf.macrolib.Utils.writeMDFToPath(Some(concat(memPrefix, mem)), mems) } - // Execute the macro compiler and compare FIRRTL outputs. - // TODO: think of a less brittle way to test this? + // Execute the macro compiler and compare FIRRTL outputs after reparsing output. + def execute(memFile: String, libFile: Option[String], synflops: Boolean, output: String): Unit = { + execute(Some(memFile), libFile, synflops, output) + } def execute(memFile: String, libFile: String, synflops: Boolean, output: String): Unit = { execute(Some(memFile), Some(libFile), synflops, output) } @@ -245,14 +247,13 @@ circuit $mem_name : """ } - // Generate the footer (contains the target memory extmodule). - def generateFooter(): String = { + // Generate the target memory ports. + def generateFooterPorts(): String = { require (libSRAM.ports.size == 1, "Footer generator only supports single port lib") val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input lib_read_en : UInt<1>" else "" val footerMask = if (libHasMask) s"input lib_mask : UInt<${libMaskBits}>" else "" s""" - extmodule $lib_name : input lib_clk : Clock input lib_addr : UInt<$lib_addr_width> input lib_din : UInt<$libWidth> @@ -260,6 +261,18 @@ circuit $mem_name : ${readEnable} input lib_write_en : UInt<1> ${footerMask} + """ + } + + // Generate the footer (contains the target memory extmodule declaration by default). + def generateFooter(): String = { + require (libSRAM.ports.size == 1, "Footer generator only supports single port lib") + + val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input lib_read_en : UInt<1>" else "" + val footerMask = if (libHasMask) s"input lib_mask : UInt<${libMaskBits}>" else "" + s""" + extmodule $lib_name : +${generateFooterPorts} defname = $lib_name """ From e3d5e4d3ad361771e3d656da0b1e80ddfabc5635 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 28 Jul 2017 15:29:03 -0700 Subject: [PATCH 063/273] Refactor execution of the compiler from the check --- macros/src/test/scala/MacroCompilerSpec.scala | 31 +++-- macros/src/test/scala/SimpleSplitDepth.scala | 75 ++++-------- macros/src/test/scala/SimpleSplitWidth.scala | 108 ++++++------------ 3 files changed, 83 insertions(+), 131 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 87ff3651d..65b6f2133 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -52,14 +52,28 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate mdf.macrolib.Utils.writeMDFToPath(Some(concat(memPrefix, mem)), mems) } - // Execute the macro compiler and compare FIRRTL outputs after reparsing output. - def execute(memFile: String, libFile: Option[String], synflops: Boolean, output: String): Unit = { - execute(Some(memFile), libFile, synflops, output) + // Convenience function for running both compile, execute, and test at once. + def compileExecuteAndTest(mem: String, lib: String, v: String, output: String, synflops: Boolean = false): Unit = { + compile(mem, lib, v, synflops) + val result = execute(mem, lib, synflops) + test(result, output) } - def execute(memFile: String, libFile: String, synflops: Boolean, output: String): Unit = { - execute(Some(memFile), Some(libFile), synflops, output) + + // Compare FIRRTL outputs after reparsing output with ScalaTest ("should be"). + def test(result: Circuit, output: String): Unit = { + val gold = RemoveEmpty run parse(output) + (result.serialize) should be (gold.serialize) + } + + // Execute the macro compiler and returns a Circuit containing the output of + // the memory compiler. + def execute(memFile: String, libFile: Option[String], synflops: Boolean): Circuit = { + execute(Some(memFile), libFile, synflops) + } + def execute(memFile: String, libFile: String, synflops: Boolean): Circuit = { + execute(Some(memFile), Some(libFile), synflops) } - def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean, output: String): Unit = { + def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean): Circuit = { var mem_full = concat(memPrefix, memFile) var lib_full = concat(libPrefix, libFile) @@ -75,9 +89,8 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate new MacroCompilerPass(Some(mems), libs), new SynFlopsPass(synflops, libs getOrElse mems), RemoveEmpty) - val result = (passes foldLeft circuit)((c, pass) => pass run c) - val gold = RemoveEmpty run parse(output) - (result.serialize) should be (gold.serialize) + val result: Circuit = (passes foldLeft circuit)((c, pass) => pass run c) + result } // Helper method to deal with String + Option[String] diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 46b4420a9..2a5c312ce 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -84,8 +84,7 @@ class SplitDepth4096x32_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memDepth = 4096 override lazy val libDepth = 1024 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth4096x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -93,8 +92,7 @@ class SplitDepth4096x16_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memDepth = 4096 override lazy val libDepth = 1024 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth32768x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -102,8 +100,7 @@ class SplitDepth32768x8_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memDepth = 32768 override lazy val libDepth = 1024 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth4096x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -111,8 +108,7 @@ class SplitDepth4096x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memDepth = 4096 override lazy val libDepth = 1024 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2048x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -120,8 +116,7 @@ class SplitDepth2048x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memDepth = 2048 override lazy val libDepth = 1024 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -129,8 +124,7 @@ class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memDepth = 1024 override lazy val libDepth = 1024 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Non power of two @@ -139,8 +133,7 @@ class SplitDepth2000x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memDepth = 2000 override lazy val libDepth = 1024 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -148,8 +141,7 @@ class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memDepth = 2049 override lazy val libDepth = 1024 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Masked RAMs @@ -162,8 +154,7 @@ class SplitDepth2048x32_mrw_lib32 extends MacroCompilerSpec with HasSRAMGenerato override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(32) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -173,8 +164,7 @@ class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(8) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Non-bit level mask @@ -186,8 +176,7 @@ class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGen override lazy val libMaskGran = Some(8) it should "be enabled when non-bitmasked memories are supported" is (pending) - //compile(mem, lib, v, false) - //execute(mem, lib, false, output) + //compileExecuteAndTest(mem, lib, v, output) } // Bit level mask @@ -198,8 +187,7 @@ class SplitDepth2048x32_mrw_mem16_lib1 extends MacroCompilerSpec with HasSRAMGen override lazy val memMaskGran = Some(16) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -209,8 +197,7 @@ class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -220,8 +207,7 @@ class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val memMaskGran = Some(4) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -231,8 +217,7 @@ class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val memMaskGran = Some(2) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Non-powers of 2 mask sizes @@ -244,8 +229,7 @@ class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val libMaskGran = Some(1) it should "be enabled when non-power of two masks are supported" is (pending) - //compile(mem, lib, v, false) - //execute(mem, lib, false, output) + //compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -256,8 +240,7 @@ class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val libMaskGran = Some(1) it should "be enabled when non-power of two masks are supported" is (pending) - //compile(mem, lib, v, false) - //execute(mem, lib, false, output) + //compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { @@ -268,8 +251,7 @@ class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val libMaskGran = Some(1) it should "be enabled when non-power of two masks are supported" is (pending) - //compile(mem, lib, v, false) - //execute(mem, lib, false, output) + //compileExecuteAndTest(mem, lib, v, output) } // Try an extra port @@ -327,8 +309,7 @@ circuit target_memory : defname = awesome_lib_mem """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Split read and (non-masked) write ports (r+w). @@ -420,8 +401,7 @@ circuit target_memory : defname = awesome_lib_mem """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } "Non-masked regular lib; split mem" should "split fine" in { @@ -454,8 +434,7 @@ circuit target_memory : TODO """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } "Non-masked split lib; regular mem" should "split fine" in { @@ -489,8 +468,7 @@ TODO TODO """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } } @@ -589,8 +567,7 @@ circuit target_memory : defname = awesome_lib_mem """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } "Non-masked regular lib; split mem" should "split fine" in { @@ -623,8 +600,7 @@ circuit target_memory : TODO """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } "Non-masked split lib; regular mem" should "split fine" in { @@ -658,7 +634,6 @@ TODO TODO """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } } diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index 4a48c7be1..b75b9fe9b 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -85,8 +85,7 @@ class SplitWidth1024x128_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 128 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x64_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -94,8 +93,7 @@ class SplitWidth1024x64_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 64 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -103,8 +101,7 @@ class SplitWidth1024x32_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 32 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -112,8 +109,7 @@ class SplitWidth1024x16_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 16 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -121,8 +117,7 @@ class SplitWidth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memWidth = 8 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Try different widths against a base memory width of 16. @@ -131,8 +126,7 @@ class SplitWidth1024x128_lib16_rw extends MacroCompilerSpec with HasSRAMGenerato override lazy val memWidth = 128 override lazy val libWidth = 16 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x64_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -140,8 +134,7 @@ class SplitWidth1024x64_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator override lazy val memWidth = 64 override lazy val libWidth = 16 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x32_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -149,8 +142,7 @@ class SplitWidth1024x32_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator override lazy val memWidth = 32 override lazy val libWidth = 16 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -158,8 +150,7 @@ class SplitWidth1024x16_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator override lazy val memWidth = 16 override lazy val libWidth = 16 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Try different widths against a base memory width of 8 but depth 512 instead of 1024. @@ -168,8 +159,7 @@ class SplitWidth512x128_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 128 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth512x64_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -177,8 +167,7 @@ class SplitWidth512x64_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memWidth = 64 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth512x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -186,8 +175,7 @@ class SplitWidth512x32_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memWidth = 32 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth512x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -195,8 +183,7 @@ class SplitWidth512x16_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memWidth = 16 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth512x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -204,8 +191,7 @@ class SplitWidth512x8_rw extends MacroCompilerSpec with HasSRAMGenerator with Ha override lazy val memWidth = 8 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Try non-power of two widths against a base memory width of 8. @@ -214,8 +200,7 @@ class SplitWidth1024x67_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 67 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x60_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -223,8 +208,7 @@ class SplitWidth1024x60_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 60 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x42_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -232,8 +216,7 @@ class SplitWidth1024x42_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 42 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x20_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -241,8 +224,7 @@ class SplitWidth1024x20_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 20 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x17_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -250,8 +232,7 @@ class SplitWidth1024x17_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 17 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x15_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -259,8 +240,7 @@ class SplitWidth1024x15_rw extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memWidth = 15 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x9_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -268,8 +248,7 @@ class SplitWidth1024x9_rw extends MacroCompilerSpec with HasSRAMGenerator with H override lazy val memWidth = 9 override lazy val libWidth = 8 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Try against a non-power of two base memory width. @@ -278,8 +257,7 @@ class SplitWidth1024x64_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator override lazy val memWidth = 64 override lazy val libWidth = 11 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x33_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -287,8 +265,7 @@ class SplitWidth1024x33_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator override lazy val memWidth = 33 override lazy val libWidth = 11 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -296,8 +273,7 @@ class SplitWidth1024x16_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator override lazy val memWidth = 16 override lazy val libWidth = 11 - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Masked RAM @@ -309,8 +285,7 @@ class SplitWidth1024x8_memGran_8_libGran_1_rw extends MacroCompilerSpec with Has override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -320,8 +295,7 @@ class SplitWidth1024x16_memGran_8_libGran_1_rw extends MacroCompilerSpec with Ha override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_memGran_8_libGran_8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -331,8 +305,7 @@ class SplitWidth1024x16_memGran_8_libGran_8_rw extends MacroCompilerSpec with Ha override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(8) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x128_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -342,8 +315,7 @@ class SplitWidth1024x128_memGran_8_libGran_1_rw extends MacroCompilerSpec with H override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_memGran_4_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -353,8 +325,7 @@ class SplitWidth1024x16_memGran_4_libGran_1_rw extends MacroCompilerSpec with Ha override lazy val memMaskGran = Some(4) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_memGran_2_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -364,8 +335,7 @@ class SplitWidth1024x16_memGran_2_libGran_1_rw extends MacroCompilerSpec with Ha override lazy val memMaskGran = Some(2) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_memGran_16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -375,8 +345,7 @@ class SplitWidth1024x16_memGran_16_libGran_1_rw extends MacroCompilerSpec with H override lazy val memMaskGran = Some(16) override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Non-masked mem, masked lib @@ -387,8 +356,7 @@ class SplitWidth1024x16_libGran_8_rw extends MacroCompilerSpec with HasSRAMGener override lazy val libWidth = 8 override lazy val libMaskGran = Some(8) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -397,8 +365,7 @@ class SplitWidth1024x16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGener override lazy val libWidth = 8 override lazy val libMaskGran = Some(1) - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Non-memMask and non-1 libMask @@ -487,8 +454,7 @@ class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGen outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -515,8 +481,7 @@ class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGen // No need to override body here due to the lack of a readEnable in the lib. - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { @@ -590,6 +555,5 @@ class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAM outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) """ - compile(mem, lib, v, false) - execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } From 3730f76fa32f6a73507b8853965b7c6e2e983c54 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 13:12:02 -0700 Subject: [PATCH 064/273] Fix unit tests to include address registers --- macros/src/test/scala/SimpleSplitDepth.scala | 40 +++++++++++++------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 2a5c312ce..0965715d2 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -15,13 +15,17 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { // Generate a depth-splitting body. override def generateBody(): String = { - var output = "" + val output = new StringBuilder if (selectBits > 0) { - output += - s""" + output.append ( +s""" node outer_addr_sel = bits(outer_addr, ${mem_addr_width - 1}, $lib_addr_width) - """ + reg outer_addr_sel_reg : UInt<${selectBits}>, outer_clk with : + reset => (UInt<1>("h0"), outer_addr_sel_reg) + outer_addr_sel_reg <= mux(UInt<1>("h1"), outer_addr_sel, outer_addr_sel_reg) +""" + ) } for (i <- 0 to depthInstances - 1) { @@ -46,7 +50,7 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { } else "" // No mask val enableIdentifier = if (selectBits > 0) s"""eq(outer_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" - output += + output.append( s""" inst mem_${i}_0 of awesome_lib_mem mem_${i}_0.lib_clk <= outer_clk @@ -57,24 +61,25 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { mem_${i}_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), ${enableIdentifier}) node outer_dout_${i} = outer_dout_${i}_0 """ + ) } def generate_outer_dout_tree(i:Int, depthInstances: Int): String = { if (i > depthInstances - 1) { "UInt<1>(\"h0\")" } else { - "mux(eq(outer_addr_sel, UInt<%d>(\"h%s\")), outer_dout_%d, %s)".format( + "mux(eq(outer_addr_sel_reg, UInt<%d>(\"h%s\")), outer_dout_%d, %s)".format( selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, depthInstances) ) } } - output += " outer_dout <= " + output append " outer_dout <= " if (selectBits > 0) { - output += generate_outer_dout_tree(0, depthInstances) + output append generate_outer_dout_tree(0, depthInstances) } else { - output += """mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0"))""" + output append """mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0"))""" } - return output + output.toString } } @@ -277,6 +282,9 @@ circuit target_memory : input outer_write_en : UInt<1> node outer_addr_sel = bits(outer_addr, 10, 10) + reg outer_addr_sel_reg : UInt<1>, outer_clk with : + reset => (UInt<1>("h0"), outer_addr_sel_reg) + outer_addr_sel_reg <= mux(UInt<1>("h1"), outer_addr_sel, outer_addr_sel_reg) inst mem_0_0 of awesome_lib_mem mem_0_0.extra_port <= UInt<8>("hff") @@ -297,7 +305,7 @@ circuit target_memory : mem_1_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h1"))) node outer_dout_1 = outer_dout_1_0 - outer_dout <= mux(eq(outer_addr_sel, UInt<1>("h0")), outer_dout_0, mux(eq(outer_addr_sel, UInt<1>("h1")), outer_dout_1, UInt<1>("h0"))) + outer_dout <= mux(eq(outer_addr_sel_reg, UInt<1>("h0")), outer_dout_0, mux(eq(outer_addr_sel_reg, UInt<1>("h1")), outer_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : input lib_clk : Clock input lib_addr : UInt<10> @@ -368,6 +376,9 @@ circuit target_memory : input outerA_write_en : UInt<1> node outerB_addr_sel = bits(outerB_addr, 10, 10) + reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : + reset => (UInt<1>("h0"), outerB_addr_sel_reg) + outerB_addr_sel_reg <= mux(UInt<1>("h1"), outerB_addr_sel, outerB_addr_sel_reg) node outerA_addr_sel = bits(outerA_addr, 10, 10) inst mem_0_0 of awesome_lib_mem mem_0_0.innerB_clk <= outerA_clk @@ -387,7 +398,7 @@ circuit target_memory : mem_1_0.innerA_addr <= outerB_addr node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) + outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : input innerA_clk : Clock @@ -531,6 +542,9 @@ circuit target_memory : input outerA_mask : UInt<1> node outerB_addr_sel = bits(outerB_addr, 10, 10) + reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : + reset => (UInt<1>("h0"), outerB_addr_sel_reg) + outerB_addr_sel_reg <= mux(UInt<1>("h1"), outerB_addr_sel, outerB_addr_sel_reg) node outerA_addr_sel = bits(outerA_addr, 10, 10) inst mem_0_0 of awesome_lib_mem mem_0_0.innerB_clk <= outerA_clk @@ -552,7 +566,7 @@ circuit target_memory : mem_1_0.innerA_addr <= outerB_addr node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) + outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : input innerA_clk : Clock From 93331cd26d0c2724ad79d538c3c9a2a7112f7f55 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 15:36:43 -0700 Subject: [PATCH 065/273] More refactor --- macros/src/test/scala/MacroCompilerSpec.scala | 45 +++++++++++-------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 65b6f2133..fb70c5617 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -53,11 +53,14 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate } // Convenience function for running both compile, execute, and test at once. - def compileExecuteAndTest(mem: String, lib: String, v: String, output: String, synflops: Boolean = false): Unit = { + def compileExecuteAndTest(mem: String, lib: Option[String], v: String, output: String, synflops: Boolean): Unit = { compile(mem, lib, v, synflops) val result = execute(mem, lib, synflops) test(result, output) } + def compileExecuteAndTest(mem: String, lib: String, v: String, output: String, synflops: Boolean = false): Unit = { + compileExecuteAndTest(mem, Some(lib), v, output, synflops) + } // Compare FIRRTL outputs after reparsing output with ScalaTest ("should be"). def test(result: Circuit, output: String): Unit = { @@ -219,10 +222,14 @@ trait HasSimpleTestGenerator { val lib_name = "awesome_lib_mem" val lib_addr_width = ceilLog2(libDepth) + // Override these to change the port prefixes if needed. + def libPortPrefix: String = "lib" + def memPortPrefix: String = "outer" + // These generate "simple" SRAMs (1 masked read-write port) by default, // but can be overridden if need be. - def generateLibSRAM() = generateSRAM(lib_name, "lib", libWidth, libDepth, libMaskGran, extraPorts) - def generateMemSRAM() = generateSRAM(mem_name, "outer", memWidth, memDepth, memMaskGran) + def generateLibSRAM() = generateSRAM(lib_name, libPortPrefix, libWidth, libDepth, libMaskGran, extraPorts) + def generateMemSRAM() = generateSRAM(mem_name, memPortPrefix, memWidth, memDepth, memMaskGran) val libSRAM = generateLibSRAM val memSRAM = generateMemSRAM @@ -245,17 +252,17 @@ trait HasSimpleTestGenerator { def generateHeader(): String = { require (memSRAM.ports.size == 1, "Header generator only supports single port mem") - val readEnable = if (memSRAM.ports(0).readEnable.isDefined) s"input outer_read_en : UInt<1>" else "" - val headerMask = if (memHasMask) s"input outer_mask : UInt<${memMaskBits}>" else "" + val readEnable = if (memSRAM.ports(0).readEnable.isDefined) s"input ${memPortPrefix}_read_en : UInt<1>" else "" + val headerMask = if (memHasMask) s"input ${memPortPrefix}_mask : UInt<${memMaskBits}>" else "" s""" circuit $mem_name : module $mem_name : - input outer_clk : Clock - input outer_addr : UInt<$mem_addr_width> - input outer_din : UInt<$memWidth> - output outer_dout : UInt<$memWidth> + input ${memPortPrefix}_clk : Clock + input ${memPortPrefix}_addr : UInt<$mem_addr_width> + input ${memPortPrefix}_din : UInt<$memWidth> + output ${memPortPrefix}_dout : UInt<$memWidth> ${readEnable} - input outer_write_en : UInt<1> + input ${memPortPrefix}_write_en : UInt<1> ${headerMask} """ } @@ -264,15 +271,15 @@ circuit $mem_name : def generateFooterPorts(): String = { require (libSRAM.ports.size == 1, "Footer generator only supports single port lib") - val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input lib_read_en : UInt<1>" else "" - val footerMask = if (libHasMask) s"input lib_mask : UInt<${libMaskBits}>" else "" + val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input ${libPortPrefix}_read_en : UInt<1>" else "" + val footerMask = if (libHasMask) s"input ${libPortPrefix}_mask : UInt<${libMaskBits}>" else "" s""" - input lib_clk : Clock - input lib_addr : UInt<$lib_addr_width> - input lib_din : UInt<$libWidth> - output lib_dout : UInt<$libWidth> + input ${libPortPrefix}_clk : Clock + input ${libPortPrefix}_addr : UInt<$lib_addr_width> + input ${libPortPrefix}_din : UInt<$libWidth> + output ${libPortPrefix}_dout : UInt<$libWidth> ${readEnable} - input lib_write_en : UInt<1> + input ${libPortPrefix}_write_en : UInt<1> ${footerMask} """ } @@ -281,8 +288,8 @@ circuit $mem_name : def generateFooter(): String = { require (libSRAM.ports.size == 1, "Footer generator only supports single port lib") - val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input lib_read_en : UInt<1>" else "" - val footerMask = if (libHasMask) s"input lib_mask : UInt<${libMaskBits}>" else "" + val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input ${libPortPrefix}_read_en : UInt<1>" else "" + val footerMask = if (libHasMask) s"input ${libPortPrefix}_mask : UInt<${libMaskBits}>" else "" s""" extmodule $lib_name : ${generateFooterPorts} From de66405fe823bb4190705b3060ad87696922a383 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 15:36:03 -0700 Subject: [PATCH 066/273] Write flop tests using generator --- macros/src/test/scala/SynFlops.scala | 667 ++++++++++++++------------- 1 file changed, 334 insertions(+), 333 deletions(-) diff --git a/macros/src/test/scala/SynFlops.scala b/macros/src/test/scala/SynFlops.scala index 2b3ff531b..0394dc40c 100644 --- a/macros/src/test/scala/SynFlops.scala +++ b/macros/src/test/scala/SynFlops.scala @@ -1,333 +1,334 @@ -//~ package barstools.tapeout.transforms.macros - -//~ import java.io.File - -//~ class Synflops2048x16_mrw extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x16-mrw.json") - //~ val v = new File(testDir, "syn_flops_2048x16_mrw.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<16> - //~ output RW0O : UInt<16> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<2> - - //~ mem ram : - //~ data-type => UInt<8>[2] - //~ depth => 2048 - //~ read-latency => 0 - //~ write-latency => 1 - //~ reader => R_0 - //~ writer => W_0 - //~ read-under-write => undefined - //~ reg R_0_addr_reg : UInt<11>, clock with : - //~ reset => (UInt<1>("h0"), R_0_addr_reg) - //~ ram.R_0.clk <= clock - //~ ram.R_0.addr <= R_0_addr_reg - //~ ram.R_0.en <= RW0E - //~ RW0O <= cat(ram.R_0.data[1], ram.R_0.data[0]) - //~ R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) - //~ ram.W_0.clk <= clock - //~ ram.W_0.addr <= RW0A - //~ ram.W_0.en <= and(RW0E, RW0W) - //~ ram.W_0.data[0] <= bits(RW0I, 7, 0) - //~ ram.W_0.data[1] <= bits(RW0I, 15, 8) - //~ ram.W_0.mask[0] <= bits(RW0M, 0, 0) - //~ ram.W_0.mask[1] <= bits(RW0M, 1, 1) -//~ """ - //~ compile(mem, None, v, true) - //~ execute(Some(mem), None, true, output) -//~ } - -//~ class Synflops2048x8_r_mw extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "mem-2048x8-r-mw.json") - //~ val v = new File(testDir, "syn_flops_2048x8_r_mw.v") - //~ val output = -//~ """ -//~ circuit name_of_sram_module : - //~ module name_of_sram_module : - //~ input clock : Clock - //~ input W0A : UInt<11> - //~ input W0I : UInt<8> - //~ input W0E : UInt<1> - //~ input W0M : UInt<1> - //~ input clock : Clock - //~ input R0A : UInt<11> - //~ output R0O : UInt<8> - - //~ mem ram : - //~ data-type => UInt<8>[1] - //~ depth => 2048 - //~ read-latency => 0 - //~ write-latency => 1 - //~ reader => R_0 - //~ writer => W_0 - //~ read-under-write => undefined - //~ reg R_0_addr_reg : UInt<11>, clock with : - //~ reset => (UInt<1>("h0"), R_0_addr_reg) - //~ ram.R_0.clk <= clock - //~ ram.R_0.addr <= R_0_addr_reg - //~ ram.R_0.en <= UInt<1>("h1") - //~ R0O <= ram.R_0.data[0] - //~ R_0_addr_reg <= mux(UInt<1>("h1"), R0A, R_0_addr_reg) - //~ ram.W_0.clk <= clock - //~ ram.W_0.addr <= W0A - //~ ram.W_0.en <= W0E - //~ ram.W_0.data[0] <= bits(W0I, 7, 0) - //~ ram.W_0.mask[0] <= bits(W0M, 0, 0) -//~ """ - //~ compile(mem, None, v, true) - //~ execute(Some(mem), None, true, output) -//~ } - -//~ class Synflops2048x10_rw extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "lib-2048x10-rw.json") - //~ val v = new File(testDir, "syn_flops_2048x10_rw.v") - //~ val output = -//~ """ -//~ circuit vendor_sram : - //~ module vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<10> - //~ output RW0O : UInt<10> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - - //~ mem ram : - //~ data-type => UInt<10> - //~ depth => 2048 - //~ read-latency => 0 - //~ write-latency => 1 - //~ reader => R_0 - //~ writer => W_0 - //~ read-under-write => undefined - //~ reg R_0_addr_reg : UInt<11>, clock with : - //~ reset => (UInt<1>("h0"), R_0_addr_reg) - //~ ram.R_0.clk <= clock - //~ ram.R_0.addr <= R_0_addr_reg - //~ ram.R_0.en <= RW0E - //~ RW0O <= ram.R_0.data - //~ R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) - //~ ram.W_0.clk <= clock - //~ ram.W_0.addr <= RW0A - //~ ram.W_0.en <= and(RW0E, RW0W) - //~ ram.W_0.data <= RW0I - //~ ram.W_0.mask <= UInt<1>("h1") -//~ """ - //~ compile(mem, None, v, true) - //~ execute(Some(mem), None, true, output) -//~ } - -//~ class Synflops2048x8_mrw_re extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "lib-2048x8-mrw-re.json") - //~ val v = new File(testDir, "syn_flops_2048x8_mrw_re.v") - //~ val output = -//~ """ -//~ circuit vendor_sram : - //~ module vendor_sram : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<8> - //~ output RW0O : UInt<8> - //~ input RW0E : UInt<1> - //~ input RW0R : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<1> - - //~ mem ram : - //~ data-type => UInt<8>[1] - //~ depth => 2048 - //~ read-latency => 0 - //~ write-latency => 1 - //~ reader => R_0 - //~ writer => W_0 - //~ read-under-write => undefined - //~ reg R_0_addr_reg : UInt<11>, clock with : - //~ reset => (UInt<1>("h0"), R_0_addr_reg) - //~ ram.R_0.clk <= clock - //~ ram.R_0.addr <= R_0_addr_reg - //~ ram.R_0.en <= and(RW0E, not(RW0R)) - //~ RW0O <= ram.R_0.data[0] - //~ R_0_addr_reg <= mux(and(RW0E, not(RW0R)), RW0A, R_0_addr_reg) - //~ ram.W_0.clk <= clock - //~ ram.W_0.addr <= RW0A - //~ ram.W_0.en <= and(RW0E, RW0W) - //~ ram.W_0.data[0] <= bits(RW0I, 7, 0) - //~ ram.W_0.mask[0] <= bits(RW0M, 0, 0) -//~ """ - //~ compile(mem, None, v, true) - //~ execute(Some(mem), None, true, output) -//~ } - -//~ class Synflops2048x16_n28 extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "lib-2048x16-n28.json") - //~ val v = new File(testDir, "syn_flops_2048x16_n28.v") - //~ val output = -//~ """ -//~ circuit vendor_sram_4 : - //~ module vendor_sram_16 : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<16> - //~ output RW0O : UInt<16> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<16> - - //~ mem ram : - //~ data-type => UInt<1>[16] - //~ depth => 2048 - //~ read-latency => 0 - //~ write-latency => 1 - //~ reader => R_0 - //~ writer => W_0 - //~ read-under-write => undefined - //~ reg R_0_addr_reg : UInt<11>, clock with : - //~ reset => (UInt<1>("h0"), R_0_addr_reg) - //~ ram.R_0.clk <= clock - //~ ram.R_0.addr <= R_0_addr_reg - //~ ram.R_0.en <= RW0E - //~ RW0O <= cat(ram.R_0.data[15], cat(ram.R_0.data[14], cat(ram.R_0.data[13], cat(ram.R_0.data[12], cat(ram.R_0.data[11], cat(ram.R_0.data[10], cat(ram.R_0.data[9], cat(ram.R_0.data[8], cat(ram.R_0.data[7], cat(ram.R_0.data[6], cat(ram.R_0.data[5], cat(ram.R_0.data[4], cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))))))))))))))) - //~ R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) - //~ ram.W_0.clk <= clock - //~ ram.W_0.addr <= RW0A - //~ ram.W_0.en <= and(RW0E, RW0W) - //~ ram.W_0.data[0] <= bits(RW0I, 0, 0) - //~ ram.W_0.data[1] <= bits(RW0I, 1, 1) - //~ ram.W_0.data[2] <= bits(RW0I, 2, 2) - //~ ram.W_0.data[3] <= bits(RW0I, 3, 3) - //~ ram.W_0.data[4] <= bits(RW0I, 4, 4) - //~ ram.W_0.data[5] <= bits(RW0I, 5, 5) - //~ ram.W_0.data[6] <= bits(RW0I, 6, 6) - //~ ram.W_0.data[7] <= bits(RW0I, 7, 7) - //~ ram.W_0.data[8] <= bits(RW0I, 8, 8) - //~ ram.W_0.data[9] <= bits(RW0I, 9, 9) - //~ ram.W_0.data[10] <= bits(RW0I, 10, 10) - //~ ram.W_0.data[11] <= bits(RW0I, 11, 11) - //~ ram.W_0.data[12] <= bits(RW0I, 12, 12) - //~ ram.W_0.data[13] <= bits(RW0I, 13, 13) - //~ ram.W_0.data[14] <= bits(RW0I, 14, 14) - //~ ram.W_0.data[15] <= bits(RW0I, 15, 15) - //~ ram.W_0.mask[0] <= bits(RW0M, 0, 0) - //~ ram.W_0.mask[1] <= bits(RW0M, 1, 1) - //~ ram.W_0.mask[2] <= bits(RW0M, 2, 2) - //~ ram.W_0.mask[3] <= bits(RW0M, 3, 3) - //~ ram.W_0.mask[4] <= bits(RW0M, 4, 4) - //~ ram.W_0.mask[5] <= bits(RW0M, 5, 5) - //~ ram.W_0.mask[6] <= bits(RW0M, 6, 6) - //~ ram.W_0.mask[7] <= bits(RW0M, 7, 7) - //~ ram.W_0.mask[8] <= bits(RW0M, 8, 8) - //~ ram.W_0.mask[9] <= bits(RW0M, 9, 9) - //~ ram.W_0.mask[10] <= bits(RW0M, 10, 10) - //~ ram.W_0.mask[11] <= bits(RW0M, 11, 11) - //~ ram.W_0.mask[12] <= bits(RW0M, 12, 12) - //~ ram.W_0.mask[13] <= bits(RW0M, 13, 13) - //~ ram.W_0.mask[14] <= bits(RW0M, 14, 14) - //~ ram.W_0.mask[15] <= bits(RW0M, 15, 15) - - //~ module vendor_sram_4 : - //~ input clock : Clock - //~ input RW0A : UInt<11> - //~ input RW0I : UInt<4> - //~ output RW0O : UInt<4> - //~ input RW0E : UInt<1> - //~ input RW0W : UInt<1> - //~ input RW0M : UInt<4> - - //~ mem ram : - //~ data-type => UInt<1>[4] - //~ depth => 2048 - //~ read-latency => 0 - //~ write-latency => 1 - //~ reader => R_0 - //~ writer => W_0 - //~ read-under-write => undefined - //~ reg R_0_addr_reg : UInt<11>, clock with : - //~ reset => (UInt<1>("h0"), R_0_addr_reg) - //~ ram.R_0.clk <= clock - //~ ram.R_0.addr <= R_0_addr_reg - //~ ram.R_0.en <= RW0E - //~ RW0O <= cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))) - //~ R_0_addr_reg <= mux(RW0E, RW0A, R_0_addr_reg) - //~ ram.W_0.clk <= clock - //~ ram.W_0.addr <= RW0A - //~ ram.W_0.en <= and(RW0E, RW0W) - //~ ram.W_0.data[0] <= bits(RW0I, 0, 0) - //~ ram.W_0.data[1] <= bits(RW0I, 1, 1) - //~ ram.W_0.data[2] <= bits(RW0I, 2, 2) - //~ ram.W_0.data[3] <= bits(RW0I, 3, 3) - //~ ram.W_0.mask[0] <= bits(RW0M, 0, 0) - //~ ram.W_0.mask[1] <= bits(RW0M, 1, 1) - //~ ram.W_0.mask[2] <= bits(RW0M, 2, 2) - //~ ram.W_0.mask[3] <= bits(RW0M, 3, 3) -//~ """ - //~ compile(mem, None, v, true) - //~ execute(Some(mem), None, true, output) -//~ } - -//~ class Synflops32x32_2rw extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "lib-32x32-2rw.json") - //~ val v = new File(testDir, "syn_flops_32x32_2rw.v") - //~ val output = -//~ """ -//~ circuit SRAM2RW32x32 : - //~ module SRAM2RW32x32 : - //~ input CE1 : Clock - //~ input A1 : UInt<5> - //~ input I1 : UInt<32> - //~ output O1 : UInt<32> - //~ input CSB1 : UInt<1> - //~ input OEB1 : UInt<1> - //~ input WEB1 : UInt<1> - //~ input CE2 : Clock - //~ input A2 : UInt<5> - //~ input I2 : UInt<32> - //~ output O2 : UInt<32> - //~ input CSB2 : UInt<1> - //~ input OEB2 : UInt<1> - //~ input WEB2 : UInt<1> - - //~ mem ram : - //~ data-type => UInt<32> - //~ depth => 32 - //~ read-latency => 0 - //~ write-latency => 1 - //~ reader => R_0 - //~ reader => R_1 - //~ writer => W_0 - //~ writer => W_1 - //~ read-under-write => undefined - //~ reg R_0_addr_reg : UInt<5>, CE1 with : - //~ reset => (UInt<1>("h0"), R_0_addr_reg) - //~ ram.R_0.clk <= CE1 - //~ ram.R_0.addr <= R_0_addr_reg - //~ ram.R_0.en <= and(not(CSB1), not(OEB1)) - //~ O1 <= ram.R_0.data - //~ R_0_addr_reg <= mux(and(not(CSB1), not(OEB1)), A1, R_0_addr_reg) - //~ reg R_1_addr_reg : UInt<5>, CE2 with : - //~ reset => (UInt<1>("h0"), R_1_addr_reg) - //~ ram.R_1.clk <= CE2 - //~ ram.R_1.addr <= R_1_addr_reg - //~ ram.R_1.en <= and(not(CSB2), not(OEB2)) - //~ O2 <= ram.R_1.data - //~ R_1_addr_reg <= mux(and(not(CSB2), not(OEB2)), A2, R_1_addr_reg) - //~ ram.W_0.clk <= CE1 - //~ ram.W_0.addr <= A1 - //~ ram.W_0.en <= and(not(CSB1), not(WEB1)) - //~ ram.W_0.data <= I1 - //~ ram.W_0.mask <= UInt<1>("h1") - //~ ram.W_1.clk <= CE2 - //~ ram.W_1.addr <= A2 - //~ ram.W_1.en <= and(not(CSB2), not(WEB2)) - //~ ram.W_1.data <= I2 - //~ ram.W_1.mask <= UInt<1>("h1") -//~ """ - //~ compile(mem, None, v, true) - //~ execute(Some(mem), None, true, output) -//~ } +package barstools.macros + +// Use this trait for tests that invoke the memory compiler without lib. +trait HasNoLibTestGenerator extends HasSimpleTestGenerator { + this: MacroCompilerSpec with HasSRAMGenerator => + + // If there isn't a lib, then the "lib" will become a FIRRTL "mem", which + // in turn becomes synthesized flops. + // Therefore, make "lib" width/depth equal to the mem. + override lazy val libDepth = memDepth + override lazy val libWidth = memWidth + // Do the same for port names. + override lazy val libPortPrefix = memPortPrefix + + // If there is no lib, don't generate a body. + override def generateBody = "" +} + +// Test flop synthesis of the memory compiler. + +trait HasSynFlopsTestGenerator extends HasSimpleTestGenerator { + this: MacroCompilerSpec with HasSRAMGenerator => + def generateFlops: String = { +s""" + mem ram : + data-type => UInt<${libWidth}> + depth => ${libDepth} + read-latency => 1 + write-latency => 1 + readwriter => RW_0 + read-under-write => undefined + ram.RW_0.clk <= ${libPortPrefix}_clk + ram.RW_0.addr <= ${libPortPrefix}_addr + ram.RW_0.en <= UInt<1>("h1") + ram.RW_0.wmode <= ${libPortPrefix}_write_en + ${libPortPrefix}_dout <= ram.RW_0.rdata + ram.RW_0.wdata <= ${libPortPrefix}_din + ram.RW_0.wmask <= UInt<1>("h1") +""" + } + + // If there is no lib, put the flops definition into the body. + abstract override def generateBody = { + if (this.isInstanceOf[HasNoLibTestGenerator]) generateFlops else super.generateBody + } + + // If there is no lib, don't generate a footer, since the flops definition + // will be in the body. + override def generateFooter = { + if (this.isInstanceOf[HasNoLibTestGenerator]) "" else +s""" + module ${lib_name} : +${generateFooterPorts} + +${generateFlops} +""" + } + +} + +class Synflops2048x8_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { + override lazy val memDepth = 2048 + override lazy val memWidth = 8 + + compileExecuteAndTest(mem, None, v, output, true) +} + +class Synflops2048x16_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { + override lazy val memDepth = 2048 + override lazy val memWidth = 16 + + compileExecuteAndTest(mem, None, v, output, true) +} + +class Synflops8192x16_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { + override lazy val memDepth = 8192 + override lazy val memWidth = 16 + + compileExecuteAndTest(mem, None, v, output, true) +} + +class Synflops2048x16_depth_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val width = 16 + + compileExecuteAndTest(mem, lib, v, output, true) +} + +class Synflops2048x64_width_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator with HasSynFlopsTestGenerator { + override lazy val memWidth = 64 + override lazy val libWidth = 8 + override lazy val depth = 1024 + + compileExecuteAndTest(mem, lib, v, output, true) +} + +class Synflops_SplitPorts_Read_Write extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { + import mdf.macrolib._ + + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val width = 8 + + override def generateLibSRAM = SRAMMacro( + macroType=SRAM, + name=lib_name, + width=width, + depth=libDepth, + family="1r1w", + ports=Seq( + generateReadPort("innerA", width, libDepth), + generateWritePort("innerB", width, libDepth) + ) + ) + + override def generateMemSRAM = SRAMMacro( + macroType=SRAM, + name=mem_name, + width=width, + depth=memDepth, + family="1r1w", + ports=Seq( + generateReadPort("outerB", width, memDepth), + generateWritePort("outerA", width, memDepth) + ) + ) + + override def generateHeader = +""" +circuit target_memory : + module target_memory : + input outerB_clk : Clock + input outerB_addr : UInt<11> + output outerB_dout : UInt<8> + input outerA_clk : Clock + input outerA_addr : UInt<11> + input outerA_din : UInt<8> + input outerA_write_en : UInt<1> +""" + + override def generateBody = +""" + node outerB_addr_sel = bits(outerB_addr, 10, 10) + reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : + reset => (UInt<1>("h0"), outerB_addr_sel_reg) + outerB_addr_sel_reg <= mux(UInt<1>("h1"), outerB_addr_sel, outerB_addr_sel_reg) + node outerA_addr_sel = bits(outerA_addr, 10, 10) + inst mem_0_0 of awesome_lib_mem + mem_0_0.innerB_clk <= outerA_clk + mem_0_0.innerB_addr <= outerA_addr + mem_0_0.innerB_din <= bits(outerA_din, 7, 0) + mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) + mem_0_0.innerA_clk <= outerB_clk + mem_0_0.innerA_addr <= outerB_addr + node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) + node outerB_dout_0 = outerB_dout_0_0 + inst mem_1_0 of awesome_lib_mem + mem_1_0.innerB_clk <= outerA_clk + mem_1_0.innerB_addr <= outerA_addr + mem_1_0.innerB_din <= bits(outerA_din, 7, 0) + mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) + mem_1_0.innerA_clk <= outerB_clk + mem_1_0.innerA_addr <= outerB_addr + node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) + node outerB_dout_1 = outerB_dout_1_0 + outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) +""" + + override def generateFooterPorts = +""" + input innerA_clk : Clock + input innerA_addr : UInt<10> + output innerA_dout : UInt<8> + input innerB_clk : Clock + input innerB_addr : UInt<10> + input innerB_din : UInt<8> + input innerB_write_en : UInt<1> +""" + + override def generateFlops = +""" + mem ram : + data-type => UInt<8> + depth => 1024 + read-latency => 1 + write-latency => 1 + reader => R_0 + writer => W_0 + read-under-write => undefined + ram.R_0.clk <= innerA_clk + ram.R_0.addr <= innerA_addr + ram.R_0.en <= UInt<1>("h1") + innerA_dout <= ram.R_0.data + ram.W_0.clk <= innerB_clk + ram.W_0.addr <= innerB_addr + ram.W_0.en <= innerB_write_en + ram.W_0.data <= innerB_din + ram.W_0.mask <= UInt<1>("h1") +""" + + "Non-masked split lib; split mem" should "syn flops fine" in { + compileExecuteAndTest(mem, lib, v, output, true) + } +} + +class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { + import mdf.macrolib._ + + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + override lazy val width = 8 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(1) + + override def generateLibSRAM = SRAMMacro( + macroType=SRAM, + name=lib_name, + width=width, + depth=libDepth, + family="1r1w", + ports=Seq( + generateReadPort("innerA", width, libDepth), + generateWritePort("innerB", width, libDepth, libMaskGran) + ) + ) + + override def generateMemSRAM = SRAMMacro( + macroType=SRAM, + name=mem_name, + width=width, + depth=memDepth, + family="1r1w", + ports=Seq( + generateReadPort("outerB", width, memDepth), + generateWritePort("outerA", width, memDepth, memMaskGran) + ) + ) + + override def generateHeader = +""" +circuit target_memory : + module target_memory : + input outerB_clk : Clock + input outerB_addr : UInt<11> + output outerB_dout : UInt<8> + input outerA_clk : Clock + input outerA_addr : UInt<11> + input outerA_din : UInt<8> + input outerA_write_en : UInt<1> + input outerA_mask : UInt<1> +""" + + override def generateBody = +""" + node outerB_addr_sel = bits(outerB_addr, 10, 10) + reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : + reset => (UInt<1>("h0"), outerB_addr_sel_reg) + outerB_addr_sel_reg <= mux(UInt<1>("h1"), outerB_addr_sel, outerB_addr_sel_reg) + node outerA_addr_sel = bits(outerA_addr, 10, 10) + inst mem_0_0 of awesome_lib_mem + mem_0_0.innerB_clk <= outerA_clk + mem_0_0.innerB_addr <= outerA_addr + mem_0_0.innerB_din <= bits(outerA_din, 7, 0) + mem_0_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) + mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) + mem_0_0.innerA_clk <= outerB_clk + mem_0_0.innerA_addr <= outerB_addr + node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) + node outerB_dout_0 = outerB_dout_0_0 + inst mem_1_0 of awesome_lib_mem + mem_1_0.innerB_clk <= outerA_clk + mem_1_0.innerB_addr <= outerA_addr + mem_1_0.innerB_din <= bits(outerA_din, 7, 0) + mem_1_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) + mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) + mem_1_0.innerA_clk <= outerB_clk + mem_1_0.innerA_addr <= outerB_addr + node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) + node outerB_dout_1 = outerB_dout_1_0 + outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) +""" + + override def generateFooterPorts = +""" + input innerA_clk : Clock + input innerA_addr : UInt<10> + output innerA_dout : UInt<8> + input innerB_clk : Clock + input innerB_addr : UInt<10> + input innerB_din : UInt<8> + input innerB_write_en : UInt<1> + input innerB_mask : UInt<8> +""" + + override def generateFlops = +""" + mem ram : + data-type => UInt<1>[8] + depth => 1024 + read-latency => 1 + write-latency => 1 + reader => R_0 + writer => W_0 + read-under-write => undefined + ram.R_0.clk <= innerA_clk + ram.R_0.addr <= innerA_addr + ram.R_0.en <= UInt<1>("h1") + innerA_dout <= cat(ram.R_0.data[7], cat(ram.R_0.data[6], cat(ram.R_0.data[5], cat(ram.R_0.data[4], cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))))))) + ram.W_0.clk <= innerB_clk + ram.W_0.addr <= innerB_addr + ram.W_0.en <= innerB_write_en + ram.W_0.data[0] <= bits(innerB_din, 0, 0) + ram.W_0.data[1] <= bits(innerB_din, 1, 1) + ram.W_0.data[2] <= bits(innerB_din, 2, 2) + ram.W_0.data[3] <= bits(innerB_din, 3, 3) + ram.W_0.data[4] <= bits(innerB_din, 4, 4) + ram.W_0.data[5] <= bits(innerB_din, 5, 5) + ram.W_0.data[6] <= bits(innerB_din, 6, 6) + ram.W_0.data[7] <= bits(innerB_din, 7, 7) + ram.W_0.mask[0] <= bits(innerB_mask, 0, 0) + ram.W_0.mask[1] <= bits(innerB_mask, 1, 1) + ram.W_0.mask[2] <= bits(innerB_mask, 2, 2) + ram.W_0.mask[3] <= bits(innerB_mask, 3, 3) + ram.W_0.mask[4] <= bits(innerB_mask, 4, 4) + ram.W_0.mask[5] <= bits(innerB_mask, 5, 5) + ram.W_0.mask[6] <= bits(innerB_mask, 6, 6) + ram.W_0.mask[7] <= bits(innerB_mask, 7, 7) +""" + + "masked split lib; masked split mem" should "syn flops fine" in { + compileExecuteAndTest(mem, lib, v, output, true) + } +} From 94b13e96fb45349b7ee9a7e3ecbf6104354845f1 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 15:36:54 -0700 Subject: [PATCH 067/273] Add functional tests To be enabled when a new firrtl-interpreter is published --- build.sbt | 5 ++ macros/src/test/scala/Functional.scala | 119 +++++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 macros/src/test/scala/Functional.scala diff --git a/build.sbt b/build.sbt index c133667e8..37b179532 100644 --- a/build.sbt +++ b/build.sbt @@ -26,6 +26,11 @@ lazy val mdf = (project in file("mdf/scalalib")) lazy val macros = (project in file("macros")) .dependsOn(mdf) .settings(commonSettings) + .settings(Seq( + libraryDependencies ++= Seq( + "edu.berkeley.cs" %% "firrtl-interpreter" % "0.1-SNAPSHOT" % Test + ) + )) lazy val tapeout = (project in file("tapeout")) .settings(commonSettings) diff --git a/macros/src/test/scala/Functional.scala b/macros/src/test/scala/Functional.scala new file mode 100644 index 000000000..cb2b180f7 --- /dev/null +++ b/macros/src/test/scala/Functional.scala @@ -0,0 +1,119 @@ +package barstools.macros + +import firrtl_interpreter.InterpretiveTester + +// Functional tests on memory compiler outputs. + +// Synchronous write and read back. +class SynchronousReadAndWrite extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 12 + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + + compile(mem, lib, v, true) + val result = execute(mem, lib, true) + + it should "run with InterpretedTester" in { + pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published + + val addr1 = 0 + val addr1val = 0xff + val addr2 = 42 + val addr2val = 0xf0 + val addr3 = 1 << 10 + val addr3val = 1 << 10 + + val tester = new InterpretiveTester(result.serialize) + //~ tester.setVerbose() + + tester.poke("outer_write_en", 0) + tester.step() + + // Write addresses and read them. + tester.poke("outer_addr", addr1) + tester.poke("outer_din", addr1val) + tester.poke("outer_write_en", 1) + tester.step() + tester.poke("outer_write_en", 0) + tester.step() + tester.poke("outer_addr", addr2) + tester.poke("outer_din", addr2val) + tester.poke("outer_write_en", 1) + tester.step() + tester.poke("outer_write_en", 0) + tester.step() + tester.poke("outer_addr", addr3) + tester.poke("outer_din", addr3val) + tester.poke("outer_write_en", 1) + tester.step() + tester.poke("outer_write_en", 0) + tester.step() + + tester.poke("outer_addr", addr1) + tester.step() + tester.expect("outer_dout", addr1val) + + tester.poke("outer_addr", addr2) + tester.step() + tester.expect("outer_dout", addr2val) + + tester.poke("outer_addr", addr3) + tester.step() + tester.expect("outer_dout", addr3val) + } +} + +// Test to verify that the circuit doesn't read combinationally based on addr +// between two submemories. +class DontReadCombinationally extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 8 + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + + compile(mem, lib, v, true) + val result = execute(mem, lib, true) + + it should "run with InterpretedTester" in { + pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published + + val addr1 = 0 + val addr1a = 1 + val addr2 = 1 << 10 + + val tester = new InterpretiveTester(result.serialize) + //~ tester.setVerbose() + + tester.poke("outer_write_en", 0) + tester.step() + + // Write two addresses, one in the lower submemory and the other in the + // higher submemory. + tester.poke("outer_addr", addr1) + tester.poke("outer_din", 0x11) + tester.poke("outer_write_en", 1) + tester.step() + tester.poke("outer_addr", addr1a) + tester.poke("outer_din", 0x1a) + tester.poke("outer_write_en", 1) + tester.step() + tester.poke("outer_addr", addr2) + tester.poke("outer_din", 0xaa) + tester.poke("outer_write_en", 1) + tester.step() + tester.poke("outer_write_en", 0) + tester.poke("outer_addr", addr1) + tester.step() + + // Test that there is no combinational read. + tester.poke("outer_addr", addr1) + tester.expect("outer_dout", 0x11) + tester.poke("outer_addr", addr1a) + tester.expect("outer_dout", 0x11) + tester.poke("outer_addr", addr2) + tester.expect("outer_dout", 0x11) + + // And upon step it should work again. + tester.step() + tester.expect("outer_addr", 0xaa) + } +} From e47cf92139a409902e754e43b9782277f56839b8 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 15:41:59 -0700 Subject: [PATCH 068/273] Move HasNoLibTestGenerator out of SynFlops --- macros/src/test/scala/MacroCompilerSpec.scala | 16 ++++++++++++++++ macros/src/test/scala/SynFlops.scala | 16 ---------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index fb70c5617..8a8f542f4 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -313,6 +313,22 @@ ${generateFooter} val output = generateOutput() } +// Use this trait for tests that invoke the memory compiler without lib. +trait HasNoLibTestGenerator extends HasSimpleTestGenerator { + this: MacroCompilerSpec with HasSRAMGenerator => + + // If there isn't a lib, then the "lib" will become a FIRRTL "mem", which + // in turn becomes synthesized flops. + // Therefore, make "lib" width/depth equal to the mem. + override lazy val libDepth = memDepth + override lazy val libWidth = memWidth + // Do the same for port names. + override lazy val libPortPrefix = memPortPrefix + + // If there is no lib, don't generate a body. + override def generateBody = "" +} + //~ class RocketChipTest extends MacroCompilerSpec { //~ val mem = new File(macroDir, "rocketchip.json") //~ val lib = new File(macroDir, "mylib.json") diff --git a/macros/src/test/scala/SynFlops.scala b/macros/src/test/scala/SynFlops.scala index 0394dc40c..f273c29e4 100644 --- a/macros/src/test/scala/SynFlops.scala +++ b/macros/src/test/scala/SynFlops.scala @@ -1,21 +1,5 @@ package barstools.macros -// Use this trait for tests that invoke the memory compiler without lib. -trait HasNoLibTestGenerator extends HasSimpleTestGenerator { - this: MacroCompilerSpec with HasSRAMGenerator => - - // If there isn't a lib, then the "lib" will become a FIRRTL "mem", which - // in turn becomes synthesized flops. - // Therefore, make "lib" width/depth equal to the mem. - override lazy val libDepth = memDepth - override lazy val libWidth = memWidth - // Do the same for port names. - override lazy val libPortPrefix = memPortPrefix - - // If there is no lib, don't generate a body. - override def generateBody = "" -} - // Test flop synthesis of the memory compiler. trait HasSynFlopsTestGenerator extends HasSimpleTestGenerator { From 519ffef50ac90937b7a7a4cec30855608dd4d78c Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 15:43:59 -0700 Subject: [PATCH 069/273] Tests aren't that brittle since firrtl reparses the output --- macros/src/test/scala/MacroCompilerSpec.scala | 2 -- 1 file changed, 2 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 8a8f542f4..892e63d03 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -6,8 +6,6 @@ import firrtl.Parser.parse import firrtl.Utils.ceilLog2 import java.io.{File, StringWriter} -// TODO: we should think of a less brittle way to run these tests. - abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { /** * Terminology note: From 85d18b736edae53ebcbedc3bd1f41cca2ef4bde3 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 15:53:46 -0700 Subject: [PATCH 070/273] Document --- macros/src/main/scala/MacroCompiler.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 129d86a17..a96043f25 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -448,6 +448,8 @@ object MacroCompiler extends App { val verilogWriter = new FileWriter(new File(params.get(Verilog).get)) if (macros.nonEmpty) { + // Note: the last macro in the input list is (seemingly arbitrarily) + // determined as the firrtl "top-level module". val circuit = Circuit(NoInfo, macros, macros.last.name) val annotations = AnnotationMap(Seq(MacroCompilerAnnotation( circuit.main, params.get(Macros).get, params.get(Library), synflops))) From a0e817b6fb8cb8f82ce0799a737cb252ea44ad26 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 16:47:24 -0700 Subject: [PATCH 071/273] Bump mdf --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index 89c15682a..1267d8813 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 89c15682aa85d0c9175c23706939533d7611e25d +Subproject commit 1267d8813ae7af004ffe8803a8a758750dfd3987 From 513da4eb37011c381f4e979bb9d97978e87df90e Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 17:03:21 -0700 Subject: [PATCH 072/273] Support non-prefixed ports --- macros/src/test/scala/MacroCompilerSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 892e63d03..95547a20e 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -119,7 +119,7 @@ trait HasSRAMGenerator { write: Boolean, writeEnable: Boolean = false ): MacroPort = { - val realPrefix = prefix + "_" + val realPrefix = if (prefix == "") "" else prefix + "_" MacroPort( address=PolarizedPort(name=realPrefix + "addr", polarity=ActiveHigh), From a177c895e81fcb9bb472d2ec4110118f83b4bbea Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 17:02:48 -0700 Subject: [PATCH 073/273] Finish rewriting in new format --- macros/src/test/scala/MacroCompilerSpec.scala | 265 ----------- macros/src/test/scala/SpecificExamples.scala | 428 ++++++++++++++++++ 2 files changed, 428 insertions(+), 265 deletions(-) create mode 100644 macros/src/test/scala/SpecificExamples.scala diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 95547a20e..b752f5f22 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -326,268 +326,3 @@ trait HasNoLibTestGenerator extends HasSimpleTestGenerator { // If there is no lib, don't generate a body. override def generateBody = "" } - -//~ class RocketChipTest extends MacroCompilerSpec { - //~ val mem = new File(macroDir, "rocketchip.json") - //~ val lib = new File(macroDir, "mylib.json") - //~ val v = new File(testDir, "rocketchip.macro.v") - //~ val output = // TODO: check correctness... -//~ """ -//~ circuit T_2172_ext : - //~ module tag_array_ext : - //~ input RW0_clk : Clock - //~ input RW0_addr : UInt<6> - //~ input RW0_wdata : UInt<80> - //~ output RW0_rdata : UInt<80> - //~ input RW0_en : UInt<1> - //~ input RW0_wmode : UInt<1> - //~ input RW0_wmask : UInt<4> - - //~ inst mem_0_0 of SRAM1RW64x32 - //~ inst mem_0_1 of SRAM1RW64x32 - //~ inst mem_0_2 of SRAM1RW64x32 - //~ inst mem_0_3 of SRAM1RW64x32 - //~ mem_0_0.CE <= RW0_clk - //~ mem_0_0.A <= RW0_addr - //~ node RW0_rdata_0_0 = bits(mem_0_0.O, 19, 0) - //~ mem_0_0.I <= bits(RW0_wdata, 19, 0) - //~ mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) - //~ mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_1.CE <= RW0_clk - //~ mem_0_1.A <= RW0_addr - //~ node RW0_rdata_0_1 = bits(mem_0_1.O, 19, 0) - //~ mem_0_1.I <= bits(RW0_wdata, 39, 20) - //~ mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) - //~ mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_2.CE <= RW0_clk - //~ mem_0_2.A <= RW0_addr - //~ node RW0_rdata_0_2 = bits(mem_0_2.O, 19, 0) - //~ mem_0_2.I <= bits(RW0_wdata, 59, 40) - //~ mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) - //~ mem_0_2.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_3.CE <= RW0_clk - //~ mem_0_3.A <= RW0_addr - //~ node RW0_rdata_0_3 = bits(mem_0_3.O, 19, 0) - //~ mem_0_3.I <= bits(RW0_wdata, 79, 60) - //~ mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) - //~ mem_0_3.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) - //~ RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) - - //~ extmodule SRAM1RW64x32 : - //~ input CE : Clock - //~ input A : UInt<6> - //~ input I : UInt<32> - //~ output O : UInt<32> - //~ input CEB : UInt<1> - //~ input OEB : UInt<1> - //~ input WEB : UInt<1> - - //~ defname = SRAM1RW64x32 - - - //~ module T_1090_ext : - //~ input RW0_clk : Clock - //~ input RW0_addr : UInt<9> - //~ input RW0_wdata : UInt<64> - //~ output RW0_rdata : UInt<64> - //~ input RW0_en : UInt<1> - //~ input RW0_wmode : UInt<1> - - //~ inst mem_0_0 of SRAM1RW512x32 - //~ inst mem_0_1 of SRAM1RW512x32 - //~ mem_0_0.CE <= RW0_clk - //~ mem_0_0.A <= RW0_addr - //~ node RW0_rdata_0_0 = bits(mem_0_0.O, 31, 0) - //~ mem_0_0.I <= bits(RW0_wdata, 31, 0) - //~ mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_0.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_1.CE <= RW0_clk - //~ mem_0_1.A <= RW0_addr - //~ node RW0_rdata_0_1 = bits(mem_0_1.O, 31, 0) - //~ mem_0_1.I <= bits(RW0_wdata, 63, 32) - //~ mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_1.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) - //~ RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) - - //~ module T_406_ext : - //~ input RW0_clk : Clock - //~ input RW0_addr : UInt<9> - //~ input RW0_wdata : UInt<64> - //~ output RW0_rdata : UInt<64> - //~ input RW0_en : UInt<1> - //~ input RW0_wmode : UInt<1> - //~ input RW0_wmask : UInt<8> - - //~ inst mem_0_0 of SRAM1RW512x32 - //~ inst mem_0_1 of SRAM1RW512x32 - //~ inst mem_0_2 of SRAM1RW512x32 - //~ inst mem_0_3 of SRAM1RW512x32 - //~ inst mem_0_4 of SRAM1RW512x32 - //~ inst mem_0_5 of SRAM1RW512x32 - //~ inst mem_0_6 of SRAM1RW512x32 - //~ inst mem_0_7 of SRAM1RW512x32 - //~ mem_0_0.CE <= RW0_clk - //~ mem_0_0.A <= RW0_addr - //~ node RW0_rdata_0_0 = bits(mem_0_0.O, 7, 0) - //~ mem_0_0.I <= bits(RW0_wdata, 7, 0) - //~ mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) - //~ mem_0_0.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_1.CE <= RW0_clk - //~ mem_0_1.A <= RW0_addr - //~ node RW0_rdata_0_1 = bits(mem_0_1.O, 7, 0) - //~ mem_0_1.I <= bits(RW0_wdata, 15, 8) - //~ mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) - //~ mem_0_1.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_2.CE <= RW0_clk - //~ mem_0_2.A <= RW0_addr - //~ node RW0_rdata_0_2 = bits(mem_0_2.O, 7, 0) - //~ mem_0_2.I <= bits(RW0_wdata, 23, 16) - //~ mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) - //~ mem_0_2.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_3.CE <= RW0_clk - //~ mem_0_3.A <= RW0_addr - //~ node RW0_rdata_0_3 = bits(mem_0_3.O, 7, 0) - //~ mem_0_3.I <= bits(RW0_wdata, 31, 24) - //~ mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) - //~ mem_0_3.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_4.CE <= RW0_clk - //~ mem_0_4.A <= RW0_addr - //~ node RW0_rdata_0_4 = bits(mem_0_4.O, 7, 0) - //~ mem_0_4.I <= bits(RW0_wdata, 39, 32) - //~ mem_0_4.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) - //~ mem_0_4.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_5.CE <= RW0_clk - //~ mem_0_5.A <= RW0_addr - //~ node RW0_rdata_0_5 = bits(mem_0_5.O, 7, 0) - //~ mem_0_5.I <= bits(RW0_wdata, 47, 40) - //~ mem_0_5.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) - //~ mem_0_5.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_6.CE <= RW0_clk - //~ mem_0_6.A <= RW0_addr - //~ node RW0_rdata_0_6 = bits(mem_0_6.O, 7, 0) - //~ mem_0_6.I <= bits(RW0_wdata, 55, 48) - //~ mem_0_6.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) - //~ mem_0_6.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ mem_0_7.CE <= RW0_clk - //~ mem_0_7.A <= RW0_addr - //~ node RW0_rdata_0_7 = bits(mem_0_7.O, 7, 0) - //~ mem_0_7.I <= bits(RW0_wdata, 63, 56) - //~ mem_0_7.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) - //~ mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) - //~ mem_0_7.CEB <= not(and(RW0_en, UInt<1>("h1"))) - //~ node RW0_rdata_0 = cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))) - //~ RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) - - //~ extmodule SRAM1RW512x32 : - //~ input CE : Clock - //~ input A : UInt<9> - //~ input I : UInt<32> - //~ output O : UInt<32> - //~ input CEB : UInt<1> - //~ input OEB : UInt<1> - //~ input WEB : UInt<1> - - //~ defname = SRAM1RW512x32 - - - //~ module T_2172_ext : - //~ input W0_clk : Clock - //~ input W0_addr : UInt<6> - //~ input W0_data : UInt<88> - //~ input W0_en : UInt<1> - //~ input W0_mask : UInt<4> - //~ input R0_clk : Clock - //~ input R0_addr : UInt<6> - //~ output R0_data : UInt<88> - //~ input R0_en : UInt<1> - - //~ inst mem_0_0 of SRAM2RW64x32 - //~ inst mem_0_1 of SRAM2RW64x32 - //~ inst mem_0_2 of SRAM2RW64x32 - //~ inst mem_0_3 of SRAM2RW64x32 - //~ mem_0_0.CE1 <= W0_clk - //~ mem_0_0.A1 <= W0_addr - //~ mem_0_0.I1 <= bits(W0_data, 21, 0) - //~ mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), UInt<1>("h1"))) - //~ mem_0_0.CEB1 <= not(and(W0_en, UInt<1>("h1"))) - //~ mem_0_1.CE1 <= W0_clk - //~ mem_0_1.A1 <= W0_addr - //~ mem_0_1.I1 <= bits(W0_data, 43, 22) - //~ mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), UInt<1>("h1"))) - //~ mem_0_1.CEB1 <= not(and(W0_en, UInt<1>("h1"))) - //~ mem_0_2.CE1 <= W0_clk - //~ mem_0_2.A1 <= W0_addr - //~ mem_0_2.I1 <= bits(W0_data, 65, 44) - //~ mem_0_2.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), UInt<1>("h1"))) - //~ mem_0_2.CEB1 <= not(and(W0_en, UInt<1>("h1"))) - //~ mem_0_3.CE1 <= W0_clk - //~ mem_0_3.A1 <= W0_addr - //~ mem_0_3.I1 <= bits(W0_data, 87, 66) - //~ mem_0_3.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), UInt<1>("h1"))) - //~ mem_0_3.CEB1 <= not(and(W0_en, UInt<1>("h1"))) - //~ mem_0_0.CE2 <= R0_clk - //~ mem_0_0.A2 <= R0_addr - //~ node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) - //~ mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - //~ mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_0.CEB2 <= not(and(R0_en, UInt<1>("h1"))) - //~ mem_0_1.CE2 <= R0_clk - //~ mem_0_1.A2 <= R0_addr - //~ node R0_data_0_1 = bits(mem_0_1.O2, 21, 0) - //~ mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - //~ mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_1.CEB2 <= not(and(R0_en, UInt<1>("h1"))) - //~ mem_0_2.CE2 <= R0_clk - //~ mem_0_2.A2 <= R0_addr - //~ node R0_data_0_2 = bits(mem_0_2.O2, 21, 0) - //~ mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - //~ mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_2.CEB2 <= not(and(R0_en, UInt<1>("h1"))) - //~ mem_0_3.CE2 <= R0_clk - //~ mem_0_3.A2 <= R0_addr - //~ node R0_data_0_3 = bits(mem_0_3.O2, 21, 0) - //~ mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - //~ mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - //~ mem_0_3.CEB2 <= not(and(R0_en, UInt<1>("h1"))) - //~ node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) - //~ R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) - - //~ extmodule SRAM2RW64x32 : - //~ input CE1 : Clock - //~ input A1 : UInt<6> - //~ input I1 : UInt<32> - //~ output O1 : UInt<32> - //~ input CEB1 : UInt<1> - //~ input OEB1 : UInt<1> - //~ input WEB1 : UInt<1> - //~ input CE2 : Clock - //~ input A2 : UInt<6> - //~ input I2 : UInt<32> - //~ output O2 : UInt<32> - //~ input CEB2 : UInt<1> - //~ input OEB2 : UInt<1> - //~ input WEB2 : UInt<1> - - //~ defname = SRAM2RW64x32 -//~ """ - //~ compile(mem, Some(lib), v, false) -//~ } diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala new file mode 100644 index 000000000..648c57bd3 --- /dev/null +++ b/macros/src/test/scala/SpecificExamples.scala @@ -0,0 +1,428 @@ +package barstools.macros + +import mdf.macrolib._ + +// Specific one-off tests to run, not created by a generator. + +class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { + val mem = s"mem-RocketChipTest.json" + val lib = s"lib-RocketChipTest.json" + val v = s"RocketChipTest.v" + + val libSRAMs = Seq( + SRAMMacro( + macroType=SRAM, + name="SRAM1RW1024x8", + depth=1024, + width=8, + family="1rw", + ports=Seq( + generateReadWritePort("", 8, 1024) + ) + ), + SRAMMacro( + macroType=SRAM, + name="SRAM1RW512x32", + depth=512, + width=32, + family="1rw", + ports=Seq( + generateReadWritePort("", 32, 512) + ) + ), + SRAMMacro( + macroType=SRAM, + name="SRAM1RW64x128", + depth=64, + width=128, + family="1rw", + ports=Seq( + generateReadWritePort("", 128, 64) + ) + ), + SRAMMacro( + macroType=SRAM, + name="SRAM1RW64x32", + depth=64, + width=32, + family="1rw", + ports=Seq( + generateReadWritePort("", 32, 64) + ) + ), + SRAMMacro( + macroType=SRAM, + name="SRAM1RW64x8", + depth=64, + width=8, + family="1rw", + ports=Seq( + generateReadWritePort("", 8, 64) + ) + ), + SRAMMacro( + macroType=SRAM, + name="SRAM1RW512x8", + depth=512, + width=8, + family="1rw", + ports=Seq( + generateReadWritePort("", 8, 512) + ) + ), + SRAMMacro( + macroType=SRAM, + name="SRAM2RW64x32", + depth=64, + width=32, + family="1r1w", + ports=Seq( + generateReadPort("portA", 32, 64), + generateWritePort("portB", 32, 64) + ) + ) + ) + + val memSRAMs = mdf.macrolib.Utils.readMDFFromString( +""" +[ + { + "type": "sram", + "name": "tag_array_ext", + "depth": 64, + "width": 80, + "ports": [ + { + "clock port name": "RW0_clk", + "mask granularity": 20, + "output port name": "RW0_rdata", + "input port name": "RW0_wdata", + "address port name": "RW0_addr", + "mask port name": "RW0_wmask", + "chip enable port name": "RW0_en", + "write enable port name": "RW0_wmode" + } + ] + }, + { + "type": "sram", + "name": "T_1090_ext", + "depth": 512, + "width": 64, + "ports": [ + { + "clock port name": "RW0_clk", + "output port name": "RW0_rdata", + "input port name": "RW0_wdata", + "address port name": "RW0_addr", + "chip enable port name": "RW0_en", + "write enable port name": "RW0_wmode" + } + ] + }, + { + "type": "sram", + "name": "T_406_ext", + "depth": 512, + "width": 64, + "ports": [ + { + "clock port name": "RW0_clk", + "mask granularity": 8, + "output port name": "RW0_rdata", + "input port name": "RW0_wdata", + "address port name": "RW0_addr", + "mask port name": "RW0_wmask", + "chip enable port name": "RW0_en", + "write enable port name": "RW0_wmode" + } + ] + }, + { + "type": "sram", + "name": "T_2172_ext", + "depth": 64, + "width": 88, + "ports": [ + { + "clock port name": "W0_clk", + "mask granularity": 22, + "input port name": "W0_data", + "address port name": "W0_addr", + "chip enable port name": "W0_en", + "mask port name": "W0_mask" + }, + { + "clock port name": "R0_clk", + "output port name": "R0_data", + "address port name": "R0_addr", + "chip enable port name": "R0_en" + } + ] + } +] +""").getOrElse(List()) + + writeToLib(lib, libSRAMs) + writeToMem(mem, memSRAMs) + + val output = // TODO: check correctness... +""" +circuit T_2172_ext : + module tag_array_ext : + input RW0_clk : Clock + input RW0_addr : UInt<6> + input RW0_wdata : UInt<80> + output RW0_rdata : UInt<80> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + input RW0_wmask : UInt<4> + + inst mem_0_0 of SRAM1RW64x8 + inst mem_0_1 of SRAM1RW64x8 + inst mem_0_2 of SRAM1RW64x8 + inst mem_0_3 of SRAM1RW64x8 + inst mem_0_4 of SRAM1RW64x8 + inst mem_0_5 of SRAM1RW64x8 + inst mem_0_6 of SRAM1RW64x8 + inst mem_0_7 of SRAM1RW64x8 + inst mem_0_8 of SRAM1RW64x8 + inst mem_0_9 of SRAM1RW64x8 + inst mem_0_10 of SRAM1RW64x8 + inst mem_0_11 of SRAM1RW64x8 + mem_0_0.clk <= RW0_clk + mem_0_0.addr <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.dout, 7, 0) + mem_0_0.din <= bits(RW0_wdata, 7, 0) + mem_0_0.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) + mem_0_1.clk <= RW0_clk + mem_0_1.addr <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.dout, 7, 0) + mem_0_1.din <= bits(RW0_wdata, 15, 8) + mem_0_1.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) + mem_0_2.clk <= RW0_clk + mem_0_2.addr <= RW0_addr + node RW0_rdata_0_2 = bits(mem_0_2.dout, 3, 0) + mem_0_2.din <= bits(RW0_wdata, 19, 16) + mem_0_2.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) + mem_0_3.clk <= RW0_clk + mem_0_3.addr <= RW0_addr + node RW0_rdata_0_3 = bits(mem_0_3.dout, 7, 0) + mem_0_3.din <= bits(RW0_wdata, 27, 20) + mem_0_3.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) + mem_0_4.clk <= RW0_clk + mem_0_4.addr <= RW0_addr + node RW0_rdata_0_4 = bits(mem_0_4.dout, 7, 0) + mem_0_4.din <= bits(RW0_wdata, 35, 28) + mem_0_4.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) + mem_0_5.clk <= RW0_clk + mem_0_5.addr <= RW0_addr + node RW0_rdata_0_5 = bits(mem_0_5.dout, 3, 0) + mem_0_5.din <= bits(RW0_wdata, 39, 36) + mem_0_5.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) + mem_0_6.clk <= RW0_clk + mem_0_6.addr <= RW0_addr + node RW0_rdata_0_6 = bits(mem_0_6.dout, 7, 0) + mem_0_6.din <= bits(RW0_wdata, 47, 40) + mem_0_6.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) + mem_0_7.clk <= RW0_clk + mem_0_7.addr <= RW0_addr + node RW0_rdata_0_7 = bits(mem_0_7.dout, 7, 0) + mem_0_7.din <= bits(RW0_wdata, 55, 48) + mem_0_7.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) + mem_0_8.clk <= RW0_clk + mem_0_8.addr <= RW0_addr + node RW0_rdata_0_8 = bits(mem_0_8.dout, 3, 0) + mem_0_8.din <= bits(RW0_wdata, 59, 56) + mem_0_8.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) + mem_0_9.clk <= RW0_clk + mem_0_9.addr <= RW0_addr + node RW0_rdata_0_9 = bits(mem_0_9.dout, 7, 0) + mem_0_9.din <= bits(RW0_wdata, 67, 60) + mem_0_9.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) + mem_0_10.clk <= RW0_clk + mem_0_10.addr <= RW0_addr + node RW0_rdata_0_10 = bits(mem_0_10.dout, 7, 0) + mem_0_10.din <= bits(RW0_wdata, 75, 68) + mem_0_10.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) + mem_0_11.clk <= RW0_clk + mem_0_11.addr <= RW0_addr + node RW0_rdata_0_11 = bits(mem_0_11.dout, 3, 0) + mem_0_11.din <= bits(RW0_wdata, 79, 76) + mem_0_11.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) + node RW0_rdata_0 = cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule SRAM1RW64x8 : + input clk : Clock + input addr : UInt<6> + input din : UInt<8> + output dout : UInt<8> + input write_en : UInt<1> + + defname = SRAM1RW64x8 + + + module T_1090_ext : + input RW0_clk : Clock + input RW0_addr : UInt<9> + input RW0_wdata : UInt<64> + output RW0_rdata : UInt<64> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + + inst mem_0_0 of SRAM1RW512x32 + inst mem_0_1 of SRAM1RW512x32 + mem_0_0.clk <= RW0_clk + mem_0_0.addr <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.dout, 31, 0) + mem_0_0.din <= bits(RW0_wdata, 31, 0) + mem_0_0.write_en <= and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.clk <= RW0_clk + mem_0_1.addr <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.dout, 31, 0) + mem_0_1.din <= bits(RW0_wdata, 63, 32) + mem_0_1.write_en <= and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1")) + node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule SRAM1RW512x32 : + input clk : Clock + input addr : UInt<9> + input din : UInt<32> + output dout : UInt<32> + input write_en : UInt<1> + + defname = SRAM1RW512x32 + + + module T_406_ext : + input RW0_clk : Clock + input RW0_addr : UInt<9> + input RW0_wdata : UInt<64> + output RW0_rdata : UInt<64> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + input RW0_wmask : UInt<8> + + inst mem_0_0 of SRAM1RW512x8 + inst mem_0_1 of SRAM1RW512x8 + inst mem_0_2 of SRAM1RW512x8 + inst mem_0_3 of SRAM1RW512x8 + inst mem_0_4 of SRAM1RW512x8 + inst mem_0_5 of SRAM1RW512x8 + inst mem_0_6 of SRAM1RW512x8 + inst mem_0_7 of SRAM1RW512x8 + mem_0_0.clk <= RW0_clk + mem_0_0.addr <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.dout, 7, 0) + mem_0_0.din <= bits(RW0_wdata, 7, 0) + mem_0_0.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) + mem_0_1.clk <= RW0_clk + mem_0_1.addr <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.dout, 7, 0) + mem_0_1.din <= bits(RW0_wdata, 15, 8) + mem_0_1.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) + mem_0_2.clk <= RW0_clk + mem_0_2.addr <= RW0_addr + node RW0_rdata_0_2 = bits(mem_0_2.dout, 7, 0) + mem_0_2.din <= bits(RW0_wdata, 23, 16) + mem_0_2.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) + mem_0_3.clk <= RW0_clk + mem_0_3.addr <= RW0_addr + node RW0_rdata_0_3 = bits(mem_0_3.dout, 7, 0) + mem_0_3.din <= bits(RW0_wdata, 31, 24) + mem_0_3.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) + mem_0_4.clk <= RW0_clk + mem_0_4.addr <= RW0_addr + node RW0_rdata_0_4 = bits(mem_0_4.dout, 7, 0) + mem_0_4.din <= bits(RW0_wdata, 39, 32) + mem_0_4.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1")) + mem_0_5.clk <= RW0_clk + mem_0_5.addr <= RW0_addr + node RW0_rdata_0_5 = bits(mem_0_5.dout, 7, 0) + mem_0_5.din <= bits(RW0_wdata, 47, 40) + mem_0_5.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1")) + mem_0_6.clk <= RW0_clk + mem_0_6.addr <= RW0_addr + node RW0_rdata_0_6 = bits(mem_0_6.dout, 7, 0) + mem_0_6.din <= bits(RW0_wdata, 55, 48) + mem_0_6.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1")) + mem_0_7.clk <= RW0_clk + mem_0_7.addr <= RW0_addr + node RW0_rdata_0_7 = bits(mem_0_7.dout, 7, 0) + mem_0_7.din <= bits(RW0_wdata, 63, 56) + mem_0_7.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1")) + node RW0_rdata_0 = cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule SRAM1RW512x8 : + input clk : Clock + input addr : UInt<9> + input din : UInt<8> + output dout : UInt<8> + input write_en : UInt<1> + + defname = SRAM1RW512x8 + + + module T_2172_ext : + input W0_clk : Clock + input W0_addr : UInt<6> + input W0_data : UInt<88> + input W0_en : UInt<1> + input W0_mask : UInt<4> + input R0_clk : Clock + input R0_addr : UInt<6> + output R0_data : UInt<88> + input R0_en : UInt<1> + + inst mem_0_0 of SRAM2RW64x32 + inst mem_0_1 of SRAM2RW64x32 + inst mem_0_2 of SRAM2RW64x32 + inst mem_0_3 of SRAM2RW64x32 + mem_0_0.portB_clk <= W0_clk + mem_0_0.portB_addr <= W0_addr + mem_0_0.portB_din <= bits(W0_data, 21, 0) + mem_0_0.portB_write_en <= and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), UInt<1>("h1")) + mem_0_1.portB_clk <= W0_clk + mem_0_1.portB_addr <= W0_addr + mem_0_1.portB_din <= bits(W0_data, 43, 22) + mem_0_1.portB_write_en <= and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), UInt<1>("h1")) + mem_0_2.portB_clk <= W0_clk + mem_0_2.portB_addr <= W0_addr + mem_0_2.portB_din <= bits(W0_data, 65, 44) + mem_0_2.portB_write_en <= and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), UInt<1>("h1")) + mem_0_3.portB_clk <= W0_clk + mem_0_3.portB_addr <= W0_addr + mem_0_3.portB_din <= bits(W0_data, 87, 66) + mem_0_3.portB_write_en <= and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), UInt<1>("h1")) + mem_0_0.portA_clk <= R0_clk + mem_0_0.portA_addr <= R0_addr + node R0_data_0_0 = bits(mem_0_0.portA_dout, 21, 0) + mem_0_1.portA_clk <= R0_clk + mem_0_1.portA_addr <= R0_addr + node R0_data_0_1 = bits(mem_0_1.portA_dout, 21, 0) + mem_0_2.portA_clk <= R0_clk + mem_0_2.portA_addr <= R0_addr + node R0_data_0_2 = bits(mem_0_2.portA_dout, 21, 0) + mem_0_3.portA_clk <= R0_clk + mem_0_3.portA_addr <= R0_addr + node R0_data_0_3 = bits(mem_0_3.portA_dout, 21, 0) + node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) + R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) + + extmodule SRAM2RW64x32 : + input portA_clk : Clock + input portA_addr : UInt<6> + output portA_dout : UInt<32> + input portB_clk : Clock + input portB_addr : UInt<6> + input portB_din : UInt<32> + input portB_write_en : UInt<1> + + defname = SRAM2RW64x32 +""" + + compileExecuteAndTest(mem, lib, v, output) +} From f854c6c9f0a7816febb1bb7b08f4f5c6355e809c Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 17:04:32 -0700 Subject: [PATCH 074/273] Nuke hardcoded JSON tests from orbit --- macros/src/test/resources/lib-1024x8-mrw.json | 36 ---- macros/src/test/resources/lib-1024x8-n28.json | 27 --- .../src/test/resources/lib-1024x8-r-mw.json | 34 ---- .../src/test/resources/lib-1024x8-sleep.json | 35 ---- macros/src/test/resources/lib-2048x10-rw.json | 24 --- .../src/test/resources/lib-2048x16-n28.json | 52 ----- .../src/test/resources/lib-2048x8-mrw-re.json | 29 --- macros/src/test/resources/lib-2048x8-mrw.json | 27 --- macros/src/test/resources/lib-32x32-2rw.json | 43 ---- macros/src/test/resources/lib-32x80-mrw.json | 27 --- macros/src/test/resources/mem-2000x8-mrw.json | 27 --- .../src/test/resources/mem-2048x16-mrw-2.json | 27 --- .../src/test/resources/mem-2048x16-mrw.json | 27 --- .../src/test/resources/mem-2048x20-mrw.json | 28 --- macros/src/test/resources/mem-2048x8-mrw.json | 28 --- .../src/test/resources/mem-2048x8-r-mw.json | 31 --- macros/src/test/resources/mem-24x52-r-w.json | 22 --- macros/src/test/resources/mem-32x160-mrw.json | 27 --- macros/src/test/resources/mylib.json | 186 ------------------ macros/src/test/resources/rocketchip.json | 76 ------- macros/src/test/scala/MacroCompilerSpec.scala | 8 +- 21 files changed, 2 insertions(+), 819 deletions(-) delete mode 100644 macros/src/test/resources/lib-1024x8-mrw.json delete mode 100644 macros/src/test/resources/lib-1024x8-n28.json delete mode 100644 macros/src/test/resources/lib-1024x8-r-mw.json delete mode 100644 macros/src/test/resources/lib-1024x8-sleep.json delete mode 100644 macros/src/test/resources/lib-2048x10-rw.json delete mode 100644 macros/src/test/resources/lib-2048x16-n28.json delete mode 100644 macros/src/test/resources/lib-2048x8-mrw-re.json delete mode 100644 macros/src/test/resources/lib-2048x8-mrw.json delete mode 100644 macros/src/test/resources/lib-32x32-2rw.json delete mode 100644 macros/src/test/resources/lib-32x80-mrw.json delete mode 100644 macros/src/test/resources/mem-2000x8-mrw.json delete mode 100644 macros/src/test/resources/mem-2048x16-mrw-2.json delete mode 100644 macros/src/test/resources/mem-2048x16-mrw.json delete mode 100644 macros/src/test/resources/mem-2048x20-mrw.json delete mode 100644 macros/src/test/resources/mem-2048x8-mrw.json delete mode 100644 macros/src/test/resources/mem-2048x8-r-mw.json delete mode 100644 macros/src/test/resources/mem-24x52-r-w.json delete mode 100644 macros/src/test/resources/mem-32x160-mrw.json delete mode 100644 macros/src/test/resources/mylib.json delete mode 100644 macros/src/test/resources/rocketchip.json diff --git a/macros/src/test/resources/lib-1024x8-mrw.json b/macros/src/test/resources/lib-1024x8-mrw.json deleted file mode 100644 index e5cfa0c29..000000000 --- a/macros/src/test/resources/lib-1024x8-mrw.json +++ /dev/null @@ -1,36 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram", - "depth": 1024, - "width": 8, - "family": "1rw", - "ports": [ - { - "clock port name": "clock", - "mask granularity": 8, - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "mask port name": "RW0M", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ] - }, - { - "type": "metal filler cell", - "name": "vender_dcap" - }, - { - "type": "filler cell", - "name": "vender_fill" - } -] diff --git a/macros/src/test/resources/lib-1024x8-n28.json b/macros/src/test/resources/lib-1024x8-n28.json deleted file mode 100644 index 7db92ecf9..000000000 --- a/macros/src/test/resources/lib-1024x8-n28.json +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram", - "depth": 1024, - "width": 8, - "ports": [ - { - "clock port name": "clock", - "mask granularity": 1, - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "mask port name": "RW0M", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/lib-1024x8-r-mw.json b/macros/src/test/resources/lib-1024x8-r-mw.json deleted file mode 100644 index 869468a47..000000000 --- a/macros/src/test/resources/lib-1024x8-r-mw.json +++ /dev/null @@ -1,34 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram", - "depth": 1024, - "width": 8, - "ports": [ - { - "clock port name": "clock", - "mask granularity": 8, - "output port name": "R0O", - "address port name": "R0A", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "address port polarity": "active high" - }, - { - "clock port name": "clock", - "mask granularity": 8, - "input port name": "W0I", - "address port name": "W0A", - "mask port name": "W0M", - "chip enable port name": "W0E", - "write enable port name": "W0W", - "clock port polarity": "positive edge", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/lib-1024x8-sleep.json b/macros/src/test/resources/lib-1024x8-sleep.json deleted file mode 100644 index 7736590d8..000000000 --- a/macros/src/test/resources/lib-1024x8-sleep.json +++ /dev/null @@ -1,35 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram", - "depth": 1024, - "width": 8, - "ports": [ - { - "clock port name": "clock", - "mask granularity": 8, - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "mask port name": "RW0M", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ], - "extra ports": [ - { - "name": "sleep", - "type": "constant", - "width": 1, - "value": 0 - } - ] - } -] diff --git a/macros/src/test/resources/lib-2048x10-rw.json b/macros/src/test/resources/lib-2048x10-rw.json deleted file mode 100644 index 75640ae56..000000000 --- a/macros/src/test/resources/lib-2048x10-rw.json +++ /dev/null @@ -1,24 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram", - "depth": 2048, - "width": 10, - "ports": [ - { - "clock port name": "clock", - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/lib-2048x16-n28.json b/macros/src/test/resources/lib-2048x16-n28.json deleted file mode 100644 index 2f549a27f..000000000 --- a/macros/src/test/resources/lib-2048x16-n28.json +++ /dev/null @@ -1,52 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram_16", - "depth": 2048, - "width": 16, - "ports": [ - { - "clock port name": "clock", - "mask granularity": 1, - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "mask port name": "RW0M", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ] - }, - { - "type": "sram", - "name": "vendor_sram_4", - "depth": 2048, - "width": 4, - "ports": [ - { - "clock port name": "clock", - "mask granularity": 1, - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "mask port name": "RW0M", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/lib-2048x8-mrw-re.json b/macros/src/test/resources/lib-2048x8-mrw-re.json deleted file mode 100644 index 5766aa78d..000000000 --- a/macros/src/test/resources/lib-2048x8-mrw-re.json +++ /dev/null @@ -1,29 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram", - "depth": 2048, - "width": 8, - "ports": [ - { - "clock port name": "clock", - "mask granularity": 8, - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "mask port name": "RW0M", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "read enable port name": "RW0R", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high", - "read enable port polarity": "active low" - } - ] - } -] diff --git a/macros/src/test/resources/lib-2048x8-mrw.json b/macros/src/test/resources/lib-2048x8-mrw.json deleted file mode 100644 index 1d4ee508a..000000000 --- a/macros/src/test/resources/lib-2048x8-mrw.json +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram", - "depth": 2048, - "width": 8, - "ports": [ - { - "clock port name": "clock", - "mask granularity": 8, - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "mask port name": "RW0M", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/lib-32x32-2rw.json b/macros/src/test/resources/lib-32x32-2rw.json deleted file mode 100644 index f90848b2f..000000000 --- a/macros/src/test/resources/lib-32x32-2rw.json +++ /dev/null @@ -1,43 +0,0 @@ -[ - { - "name": "SRAM2RW32x32", - "type": "sram", - "family": "2rw", - "depth": 32, - "width": 32, - "ports": [ - { - "clock port name": "CE1", - "clock port polarity": "positive edge", - "address port name": "A1", - "address port polarity": "active high", - "input port name": "I1", - "input port polarity": "active high", - "output port name": "O1", - "output port polarity": "active high", - "read enable port name": "OEB1", - "read enable port polarity": "active low", - "write enable port name": "WEB1", - "write enable port polarity": "active low", - "chip enable port name": "CSB1", - "chip enable port polarity": "active low" - }, - { - "clock port name": "CE2", - "clock port polarity": "positive edge", - "address port name": "A2", - "address port polarity": "active high", - "input port name": "I2", - "input port polarity": "active high", - "output port name": "O2", - "output port polarity": "active high", - "read enable port name": "OEB2", - "read enable port polarity": "active low", - "write enable port name": "WEB2", - "write enable port polarity": "active low", - "chip enable port name": "CSB2", - "chip enable port polarity": "active low" - } - ] - } -] diff --git a/macros/src/test/resources/lib-32x80-mrw.json b/macros/src/test/resources/lib-32x80-mrw.json deleted file mode 100644 index bdf0581bb..000000000 --- a/macros/src/test/resources/lib-32x80-mrw.json +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "type": "sram", - "name": "vendor_sram", - "depth": 32, - "width": 80, - "ports": [ - { - "clock port name": "clock", - "mask granularity": 1, - "output port name": "RW0O", - "input port name": "RW0I", - "address port name": "RW0A", - "mask port name": "RW0M", - "chip enable port name": "RW0E", - "write enable port name": "RW0W", - "clock port polarity": "positive edge", - "output port polarity": "active high", - "input port polarity": "active high", - "address port polarity": "active high", - "mask port polarity": "active high", - "chip enable port polarity": "active high", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/mem-2000x8-mrw.json b/macros/src/test/resources/mem-2000x8-mrw.json deleted file mode 100644 index cbb5887a8..000000000 --- a/macros/src/test/resources/mem-2000x8-mrw.json +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "type": "sram", - "name": "name_of_sram_module", - "depth": 2000, - "width": 8, - "ports": [ - { - "clock port name": "clock", - "clock port polarity": "positive edge", - "mask granularity": 8, - "output port name": "RW0O", - "output port polarity": "active high", - "input port name": "RW0I", - "input port polarity": "active high", - "address port name": "RW0A", - "address port polarity": "active high", - "mask port name": "RW0M", - "mask port polarity": "active high", - "chip enable port name": "RW0E", - "chip enable port polarity": "active high", - "write enable port name": "RW0W", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/mem-2048x16-mrw-2.json b/macros/src/test/resources/mem-2048x16-mrw-2.json deleted file mode 100644 index dcd4aa536..000000000 --- a/macros/src/test/resources/mem-2048x16-mrw-2.json +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "type": "sram", - "name": "name_of_sram_module", - "depth": 2048, - "width": 16, - "ports": [ - { - "clock port name": "clock", - "clock port polarity": "positive edge", - "mask granularity": 2, - "output port name": "RW0O", - "output port polarity": "active high", - "input port name": "RW0I", - "input port polarity": "active high", - "address port name": "RW0A", - "address port polarity": "active high", - "mask port name": "RW0M", - "mask port polarity": "active high", - "chip enable port name": "RW0E", - "chip enable port polarity": "active high", - "write enable port name": "RW0W", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/mem-2048x16-mrw.json b/macros/src/test/resources/mem-2048x16-mrw.json deleted file mode 100644 index 2bf003fe6..000000000 --- a/macros/src/test/resources/mem-2048x16-mrw.json +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "type": "sram", - "name": "name_of_sram_module", - "depth": 2048, - "width": 16, - "ports": [ - { - "clock port name": "clock", - "clock port polarity": "positive edge", - "mask granularity": 8, - "output port name": "RW0O", - "output port polarity": "active high", - "input port name": "RW0I", - "input port polarity": "active high", - "address port name": "RW0A", - "address port polarity": "active high", - "mask port name": "RW0M", - "mask port polarity": "active high", - "chip enable port name": "RW0E", - "chip enable port polarity": "active high", - "write enable port name": "RW0W", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/mem-2048x20-mrw.json b/macros/src/test/resources/mem-2048x20-mrw.json deleted file mode 100644 index 740325066..000000000 --- a/macros/src/test/resources/mem-2048x20-mrw.json +++ /dev/null @@ -1,28 +0,0 @@ -[ - { - "type": "sram", - "name": "name_of_sram_module", - "depth": 2048, - "width": 20, - "ports": [ - { - "clock port name": "clock", - "clock port polarity": "positive edge", - "mask granularity": 10, - "output port name": "RW0O", - "output port polarity": "active high", - "input port name": "RW0I", - "input port polarity": "active high", - "address port name": "RW0A", - "address port polarity": "active high", - "mask port name": "RW0M", - "mask port polarity": "active high", - "chip enable port name": "RW0E", - "chip enable port polarity": "active high", - "write enable port name": "RW0W", - "write enable port polarity": "active high" - } - ] - } -] - diff --git a/macros/src/test/resources/mem-2048x8-mrw.json b/macros/src/test/resources/mem-2048x8-mrw.json deleted file mode 100644 index 64f6bfd70..000000000 --- a/macros/src/test/resources/mem-2048x8-mrw.json +++ /dev/null @@ -1,28 +0,0 @@ -[ - { - "type": "sram", - "name": "name_of_sram_module", - "depth": 2048, - "width": 8, - "family": "1rw", - "ports": [ - { - "clock port name": "clock", - "clock port polarity": "positive edge", - "mask granularity": 8, - "output port name": "RW0O", - "output port polarity": "active high", - "input port name": "RW0I", - "input port polarity": "active high", - "address port name": "RW0A", - "address port polarity": "active high", - "mask port name": "RW0M", - "mask port polarity": "active high", - "chip enable port name": "RW0E", - "chip enable port polarity": "active high", - "write enable port name": "RW0W", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/mem-2048x8-r-mw.json b/macros/src/test/resources/mem-2048x8-r-mw.json deleted file mode 100644 index e5fd13d11..000000000 --- a/macros/src/test/resources/mem-2048x8-r-mw.json +++ /dev/null @@ -1,31 +0,0 @@ -[ - { - "type": "sram", - "name": "name_of_sram_module", - "depth": 2048, - "width": 8, - "ports": [ - { - "clock port name": "clock", - "clock port polarity": "positive edge", - "mask granularity": 8, - "input port name": "W0I", - "input port polarity": "active high", - "address port name": "W0A", - "address port polarity": "active high", - "mask port name": "W0M", - "mask port polarity": "active high", - "chip enable port name": "W0E", - "chip enable port polarity": "active high" - }, - { - "clock port name": "clock", - "clock port polarity": "positive edge", - "output port name": "R0O", - "output port polarity": "active high", - "address port name": "R0A", - "address port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/mem-24x52-r-w.json b/macros/src/test/resources/mem-24x52-r-w.json deleted file mode 100644 index e4bf66302..000000000 --- a/macros/src/test/resources/mem-24x52-r-w.json +++ /dev/null @@ -1,22 +0,0 @@ -[ - { - "type": "sram", - "name": "entries_info_ext", - "depth": 24, - "width": 52, - "ports": [ - { - "clock port name": "R0_clk", - "output port name": "R0_data", - "address port name": "R0_addr", - "chip enable port name": "R0_en" - }, - { - "clock port name": "W0_clk", - "input port name": "W0_data", - "address port name": "W0_addr", - "chip enable port name": "W0_en" - } - ] - } -] diff --git a/macros/src/test/resources/mem-32x160-mrw.json b/macros/src/test/resources/mem-32x160-mrw.json deleted file mode 100644 index a01a6d6c1..000000000 --- a/macros/src/test/resources/mem-32x160-mrw.json +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "type": "sram", - "name": "name_of_sram_module", - "depth": 32, - "width": 160, - "ports": [ - { - "clock port name": "clock", - "clock port polarity": "positive edge", - "mask granularity": 20, - "output port name": "RW0O", - "output port polarity": "active high", - "input port name": "RW0I", - "input port polarity": "active high", - "address port name": "RW0A", - "address port polarity": "active high", - "mask port name": "RW0M", - "mask port polarity": "active high", - "chip enable port name": "RW0E", - "chip enable port polarity": "active high", - "write enable port name": "RW0W", - "write enable port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/mylib.json b/macros/src/test/resources/mylib.json deleted file mode 100644 index de71d89b7..000000000 --- a/macros/src/test/resources/mylib.json +++ /dev/null @@ -1,186 +0,0 @@ -[ - { - "type": "sram", - "name": "SRAM1RW1024x8", - "width": 8, - "depth": 1024, - "ports": [ - { - "address port name": "A", - "address port polarity": "active high", - "clock port name": "CE", - "clock port polarity": "positive edge", - "write enable port name": "WEB", - "write enable port polarity": "active low", - "read enable port name": "OEB", - "read enable port polarity": "active low", - "chip enable port name": "CEB", - "chip enable port polarity": "active low", - "output port name": "O", - "output port polarity": "active high", - "input port name": "I", - "input port polarity": "active high" - } - ] - }, - { - "type": "sram", - "name": "SRAM1RW512x32", - "width": 32, - "depth": 512, - "ports": [ - { - "address port name": "A", - "address port polarity": "active high", - "clock port name": "CE", - "clock port polarity": "positive edge", - "write enable port name": "WEB", - "write enable port polarity": "active low", - "read enable port name": "OEB", - "read enable port polarity": "active low", - "chip enable port name": "CEB", - "chip enable port polarity": "active low", - "output port name": "O", - "output port polarity": "active high", - "input port name": "I", - "input port polarity": "active high" - } - ] - }, - { - "type": "sram", - "name": "SRAM1RW64x128", - "width": 128, - "depth": 64, - "ports": [ - { - "address port name": "A", - "address port polarity": "active high", - "clock port name": "CE", - "clock port polarity": "positive edge", - "write enable port name": "WEB", - "write enable port polarity": "active low", - "read enable port name": "OEB", - "read enable port polarity": "active low", - "chip enable port name": "CEB", - "chip enable port polarity": "active low", - "output port name": "O", - "output port polarity": "active high", - "input port name": "I", - "input port polarity": "active high" - } - ] - }, - { - "type": "sram", - "name": "SRAM1RW64x32", - "width": 32, - "depth": 64, - "ports": [ - { - "address port name": "A", - "address port polarity": "active high", - "clock port name": "CE", - "clock port polarity": "positive edge", - "write enable port name": "WEB", - "write enable port polarity": "active low", - "read enable port name": "OEB", - "read enable port polarity": "active low", - "chip enable port name": "CEB", - "chip enable port polarity": "active low", - "output port name": "O", - "output port polarity": "active high", - "input port name": "I", - "input port polarity": "active high" - } - ] - }, - { - "type": "sram", - "name": "SRAM1RW64x8", - "width": 8, - "depth": 64, - "ports": [ - { - "address port name": "A", - "address port polarity": "active high", - "clock port name": "CE", - "clock port polarity": "positive edge", - "write enable port name": "WEB", - "write enable port polarity": "active low", - "read enable port name": "OEB", - "read enable port polarity": "active low", - "chip enable port name": "CEB", - "chip enable port polarity": "active low", - "output port name": "O", - "output port polarity": "active high", - "input port name": "I", - "input port polarity": "active high" - } - ] - }, - { - "type": "sram", - "name": "SRAM1RW512x8", - "width": 8, - "depth": 512, - "ports": [ - { - "address port name": "A", - "address port polarity": "active high", - "clock port name": "CE", - "clock port polarity": "positive edge", - "write enable port name": "WEB", - "write enable port polarity": "active low", - "read enable port name": "OEB", - "read enable port polarity": "active low", - "chip enable port name": "CEB", - "chip enable port polarity": "active low", - "output port name": "O", - "output port polarity": "active high", - "input port name": "I", - "input port polarity": "active high" - } - ] - }, - { - "type": "sram", - "name": "SRAM2RW64x32", - "width": 32, - "depth": 64, - "ports": [ - { - "address port name": "A1", - "address port polarity": "active high", - "clock port name": "CE1", - "clock port polarity": "positive edge", - "write enable port name": "WEB1", - "write enable port polarity": "active low", - "read enable port name": "OEB1", - "read enable port polarity": "active low", - "chip enable port name": "CEB1", - "chip enable port polarity": "active low", - "output port name": "O1", - "output port polarity": "active high", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "address port name": "A2", - "address port polarity": "active high", - "clock port name": "CE2", - "clock port polarity": "positive edge", - "write enable port name": "WEB2", - "write enable port polarity": "active low", - "read enable port name": "OEB2", - "read enable port polarity": "active low", - "chip enable port name": "CEB2", - "chip enable port polarity": "active low", - "output port name": "O2", - "output port polarity": "active high", - "input port name": "I2", - "input port polarity": "active high" - } - ] - } -] diff --git a/macros/src/test/resources/rocketchip.json b/macros/src/test/resources/rocketchip.json deleted file mode 100644 index 9fe0d2c42..000000000 --- a/macros/src/test/resources/rocketchip.json +++ /dev/null @@ -1,76 +0,0 @@ -[ - { - "type": "sram", - "name": "tag_array_ext", - "depth": 64, - "width": 80, - "ports": [ - { - "clock port name": "RW0_clk", - "mask granularity": 20, - "output port name": "RW0_rdata", - "input port name": "RW0_wdata", - "address port name": "RW0_addr", - "mask port name": "RW0_wmask", - "chip enable port name": "RW0_en", - "write enable port name": "RW0_wmode" - } - ] - }, - { - "type": "sram", - "name": "T_1090_ext", - "depth": 512, - "width": 64, - "ports": [ - { - "clock port name": "RW0_clk", - "output port name": "RW0_rdata", - "input port name": "RW0_wdata", - "address port name": "RW0_addr", - "chip enable port name": "RW0_en", - "write enable port name": "RW0_wmode" - } - ] - }, - { - "type": "sram", - "name": "T_406_ext", - "depth": 512, - "width": 64, - "ports": [ - { - "clock port name": "RW0_clk", - "mask granularity": 8, - "output port name": "RW0_rdata", - "input port name": "RW0_wdata", - "address port name": "RW0_addr", - "mask port name": "RW0_wmask", - "chip enable port name": "RW0_en", - "write enable port name": "RW0_wmode" - } - ] - }, - { - "type": "sram", - "name": "T_2172_ext", - "depth": 64, - "width": 88, - "ports": [ - { - "clock port name": "W0_clk", - "mask granularity": 22, - "input port name": "W0_data", - "address port name": "W0_addr", - "chip enable port name": "W0_en", - "mask port name": "W0_mask" - }, - { - "clock port name": "R0_clk", - "output port name": "R0_data", - "address port name": "R0_addr", - "chip enable port name": "R0_en" - } - ] - } -] diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index b752f5f22..5253623c8 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -13,13 +13,12 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate * lib - technology SRAM(s) to use to compile mem */ - val macroDir: String = "tapeout/src/test/resources/macros" val testDir: String = "test_run_dir/macros" new File(testDir).mkdirs // Make sure the testDir exists // Override these to change the prefixing of macroDir and testDir - val memPrefix: String = macroDir - val libPrefix: String = macroDir + val memPrefix: String = testDir + val libPrefix: String = testDir val vPrefix: String = testDir private def args(mem: String, lib: Option[String], v: String, synflops: Boolean) = @@ -198,9 +197,6 @@ trait HasSimpleTestGenerator { require (memDepth >= libDepth) - override val memPrefix = testDir - override val libPrefix = testDir - // Convenience variables to check if a mask exists. val memHasMask = memMaskGran != None val libHasMask = libMaskGran != None From 0f2d00e0082c2ebf30f4e1230ac7feb13e92db93 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 18:46:43 -0700 Subject: [PATCH 075/273] Add some documentation --- macros/src/main/scala/MacroCompiler.scala | 29 ++++++++++++++++------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index a96043f25..3dc8a06b3 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -320,14 +320,21 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], def run(c: Circuit): Circuit = { val modules = (mems, libs) match { - case (Some(mems), Some(libs)) => (mems foldLeft c.modules){ (modules, mem) => + case (Some(mems), Some(libs)) => + // Try to compile each of the memories in mems. + // The 'state' is c.modules, which is a list of all the firrtl modules + // in the 'circuit'. + (mems foldLeft c.modules){ (modules, mem) => + + // Try to compile mem against each lib in libs, keeping track of the + // best compiled version, external lib used, and cost. val (best, cost) = (libs foldLeft (None: Option[(Module, ExtModule)], BigInt(Long.MaxValue))){ - case ((best, area), lib) if mem.src.ports.size != lib.src.ports.size => + case ((best, cost), lib) if mem.src.ports.size != lib.src.ports.size => /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same * order, but I'm starting with what actually gets generated. */ System.err println s"INFO: unable to compile ${mem.src.name} using ${lib.src.name} port count must match" - (best, area) - case ((best, area), lib) => + (best, cost) + case ((best, cost), lib) => /* Palmer: A quick cost function (that must be kept in sync with * memory_cost()) that attempts to avoid compiling unncessary * memories. This is a lower bound on the cost of compiling a @@ -341,16 +348,20 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], case (Some(1), Some(1)) | (None, _) => mem.src.width case (Some(p), _) => p // assume that the memory consists of smaller chunks } - val cost = (((mem.src.depth - 1) / lib.src.depth) + 1) * + val newCost = (((mem.src.depth - 1) / lib.src.depth) + 1) * (((memWidth - 1) / lib.src.width) + 1) * (lib.src.depth * lib.src.width + 1) // weights on # cells - System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${cost}") - if (cost > area) (best, area) + System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") + if (newCost > cost) (best, cost) else compile(mem, lib) match { - case None => (best, area) - case Some(p) => (Some(p), cost) + case None => (best, cost) + case Some(p) => (Some(p), newCost) } } + + // If we were able to compile anything, then replace the original module + // in the modules list with a compiled version, as well as the extmodule + // stub for the lib. best match { case None => modules case Some((mod, bb)) => From 5d14f1995a6f91c79656fe440e011bdd62ed6575 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Mon, 31 Jul 2017 19:17:54 -0700 Subject: [PATCH 076/273] Start cost function refactor --- macros/src/main/scala/MacroCompiler.scala | 86 +++++++++++++++++------ 1 file changed, 65 insertions(+), 21 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 3dc8a06b3..d26f5912f 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -13,6 +13,60 @@ import scala.collection.mutable.{ArrayBuffer, HashMap} import java.io.{File, FileWriter} import Utils._ +/** + * Trait which can calculate the cost of compiling a memory against a certain + * library memory macro using a cost function. + */ +// TODO: eventually explore compiling a single target memory using multiple +// different kinds of target memory. +trait CostMetric { + /** + * Cost function that returns the cost of compiling a memory using a certain + * macro. + * + * @param mem Memory macro to compile (target memory) + * @param lib Library memory macro to use (library memory) + * @return The cost of this compile, defined by this cost metric, or None if + * it cannot be compiled. + */ + def cost(mem: Macro, lib: Macro): Option[BigInt] +} + +/** Some default cost functions. */ +object PalmerMetric extends CostMetric { + override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + /* Palmer: A quick cost function (that must be kept in sync with + * memory_cost()) that attempts to avoid compiling unncessary + * memories. This is a lower bound on the cost of compiling a + * memory: it assumes 100% bit-cell utilization when mapping. */ + // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + + // (mem.depth * mem.width) + ??? + } +} + +// The current default metric in barstools, re-defined by Donggyu. +object NewDefaultMetric extends CostMetric { + override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) + val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) + val memWidth = (memMask, libMask) match { + case (Some(1), Some(1)) | (None, _) => mem.src.width + case (Some(p), _) => p // assume that the memory consists of smaller chunks + } + return Some( + (((mem.src.depth - 1) / lib.src.depth) + 1) * + (((memWidth - 1) / lib.src.width) + 1) * + (lib.src.depth * lib.src.width + 1) // weights on # cells + ) + } +} + +object CostMetric { + /** Define some default metric. */ + val default: CostMetric = NewDefaultMetric +} + object MacroCompilerAnnotation { def apply(c: String, mem: File, lib: Option[File], synflops: Boolean): Annotation = apply(c, mem.toString, lib map (_.toString), synflops) @@ -335,27 +389,17 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], System.err println s"INFO: unable to compile ${mem.src.name} using ${lib.src.name} port count must match" (best, cost) case ((best, cost), lib) => - /* Palmer: A quick cost function (that must be kept in sync with - * memory_cost()) that attempts to avoid compiling unncessary - * memories. This is a lower bound on the cost of compiling a - * memory: it assumes 100% bit-cell utilization when mapping. */ - // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + - // (mem.depth * mem.width) - // Donggyu: I re-define cost - val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) - val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) - val memWidth = (memMask, libMask) match { - case (Some(1), Some(1)) | (None, _) => mem.src.width - case (Some(p), _) => p // assume that the memory consists of smaller chunks - } - val newCost = (((mem.src.depth - 1) / lib.src.depth) + 1) * - (((memWidth - 1) / lib.src.width) + 1) * - (lib.src.depth * lib.src.width + 1) // weights on # cells - System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") - if (newCost > cost) (best, cost) - else compile(mem, lib) match { - case None => (best, cost) - case Some(p) => (Some(p), newCost) + // Run the cost function to evaluate this potential compile. + CostMetric.default.cost(mem, lib) match { + case Some(newCost) => { + System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") + if (newCost > cost) (best, cost) + else compile(mem, lib) match { + case None => (best, cost) + case Some(p) => (Some(p), newCost) + } + } + case _ => (best, cost) // Cost function rejected this combination. } } From 122e433e94e2b2ec8d57f290311bbd623fa77ab2 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 11:18:38 -0700 Subject: [PATCH 077/273] Bump mdf again --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index 1267d8813..893ca7476 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 1267d8813ae7af004ffe8803a8a758750dfd3987 +Subproject commit 893ca7476a6af689921a5ca99afe6df9c88fd3fd From 0e474dac5e83ff07a7d0ff221e64d20ea544cddc Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 11:18:53 -0700 Subject: [PATCH 078/273] Add external metric --- macros/src/main/scala/MacroCompiler.scala | 42 +++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index d26f5912f..642b85ef9 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -32,7 +32,9 @@ trait CostMetric { def cost(mem: Macro, lib: Macro): Option[BigInt] } -/** Some default cost functions. */ +// Some default cost functions. + +/** Palmer's old metric. */ object PalmerMetric extends CostMetric { override def cost(mem: Macro, lib: Macro): Option[BigInt] = { /* Palmer: A quick cost function (that must be kept in sync with @@ -45,7 +47,43 @@ object PalmerMetric extends CostMetric { } } -// The current default metric in barstools, re-defined by Donggyu. +/** + * An external cost function. + * Calls the specified path with paths to the JSON MDF representation of the mem + * and lib macros. The external executable should return a BigInt. + * None will be returned if the external executable does not return a valid + * BigInt. + */ +class ExternalMetric(path: String) extends CostMetric { + import mdf.macrolib.Utils.writeMacroToPath + import java.io._ + import scala.language.postfixOps // for !! postfix op + import sys.process._ + + override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + // Create temporary files. + val memFile = File.createTempFile("_macrocompiler_mem_", ".json") + val libFile = File.createTempFile("_macrocompiler_lib_", ".json") + + writeMacroToPath(Some(memFile.getAbsolutePath), mem.src) + writeMacroToPath(Some(libFile.getAbsolutePath), lib.src) + + // !! executes the given command + val result: String = (s"${path} ${memFile.getAbsolutePath} ${libFile.getAbsolutePath}" !!).trim + + // Remove temporary files. + memFile.delete() + libFile.delete() + + try { + Some(BigInt(result)) + } catch { + case e: NumberFormatException => None + } + } +} + +/** The current default metric in barstools, re-defined by Donggyu. */ object NewDefaultMetric extends CostMetric { override def cost(mem: Macro, lib: Macro): Option[BigInt] = { val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) From ffd7893d631ab1ad81374c9bd41e2685a6e62721 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 12:24:52 -0700 Subject: [PATCH 079/273] Implement cost selection from command line --- macros/src/main/scala/MacroCompiler.scala | 35 +++++++++++++++++------ 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 642b85ef9..d960a5db2 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -103,6 +103,15 @@ object NewDefaultMetric extends CostMetric { object CostMetric { /** Define some default metric. */ val default: CostMetric = NewDefaultMetric + + /** Select a cost function from string. */ + def getCostMetric(m: String, params: Map[String, String]): CostMetric = m match { + case "default" => default + case "PalmerMetric" => PalmerMetric + case "ExternalMetric" => new ExternalMetric(params.get("path").get) + case "NewDefaultMetric" => NewDefaultMetric + case _ => throw new IllegalArgumentException("Invalid cost metric " + m) + } } object MacroCompilerAnnotation { @@ -508,32 +517,41 @@ object MacroCompiler extends App { case object Macros extends MacroParam case object Library extends MacroParam case object Verilog extends MacroParam + case object CostFunc extends MacroParam type MacroParamMap = Map[MacroParam, String] + type CostParamMap = Map[String, String] val usage = Seq( "Options:", " -m, --macro-list: The set of macros to compile", " -l, --library: The set of macros that have blackbox instances", " -v, --verilog: Verilog output", + " -c, --cost-func: Cost function to use. Optional (default: \"default\")", + " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", " --syn-flop: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes") mkString "\n" - def parseArgs(map: MacroParamMap, synflops: Boolean, args: List[String]): (MacroParamMap, Boolean) = + def parseArgs(map: MacroParamMap, costMap: CostParamMap, synflops: Boolean, args: List[String]): (MacroParamMap, Boolean) = args match { case Nil => (map, synflops) case ("-m" | "--macro-list") :: value :: tail => - parseArgs(map + (Macros -> value), synflops, tail) + parseArgs(map + (Macros -> value), costMap, synflops, tail) case ("-l" | "--library") :: value :: tail => - parseArgs(map + (Library -> value), synflops, tail) + parseArgs(map + (Library -> value), costMap, synflops, tail) case ("-v" | "--verilog") :: value :: tail => - parseArgs(map + (Verilog -> value), synflops, tail) + parseArgs(map + (Verilog -> value), costMap, synflops, tail) + case ("-c" | "--cost-func") :: value :: tail => + parseArgs(map + (CostFunc -> value), costMap, synflops, tail) + case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => + parseArgs(map, costMap + (value1 -> value2), synflops, tail) case "--syn-flops" :: tail => - parseArgs(map, true, tail) + parseArgs(map, costMap, true, tail) case arg :: tail => println(s"Unknown field $arg\n") - throw new Exception(usage) + println(usage) + sys.exit(1) } def run(args: List[String]) { - val (params, synflops) = parseArgs(Map[MacroParam, String](), false, args) + val (params, synflops) = parseArgs(Map[MacroParam, String](), Map[String, String](), false, args) try { val macros = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) @@ -560,7 +578,8 @@ object MacroCompiler extends App { } catch { case e: java.util.NoSuchElementException => - throw new Exception(usage) + println(usage) + sys.exit(1) case e: Throwable => throw e } From 00c99f56b1839201311cb07bc2a8a280ed45fd38 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 12:25:42 -0700 Subject: [PATCH 080/273] Add sbt-assembly for making jar --- build.sbt | 6 +++++- macros/build.sbt | 1 + project/Dependencies.scala | 6 ++++-- project/assembly.sbt | 2 ++ 4 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 macros/build.sbt create mode 100644 project/assembly.sbt diff --git a/build.sbt b/build.sbt index 37b179532..f18110c7f 100644 --- a/build.sbt +++ b/build.sbt @@ -22,6 +22,8 @@ lazy val commonSettings = Seq( ) ) +disablePlugins(sbtassembly.AssemblyPlugin) + lazy val mdf = (project in file("mdf/scalalib")) lazy val macros = (project in file("macros")) .dependsOn(mdf) @@ -29,8 +31,10 @@ lazy val macros = (project in file("macros")) .settings(Seq( libraryDependencies ++= Seq( "edu.berkeley.cs" %% "firrtl-interpreter" % "0.1-SNAPSHOT" % Test - ) + ), + mainClass := Some("barstools.macros.MacroCompiler") )) + .enablePlugins(sbtassembly.AssemblyPlugin) lazy val tapeout = (project in file("tapeout")) .settings(commonSettings) diff --git a/macros/build.sbt b/macros/build.sbt new file mode 100644 index 000000000..65e9704a1 --- /dev/null +++ b/macros/build.sbt @@ -0,0 +1 @@ +enablePlugins(sbtassembly.AssemblyPlugin) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 52c074b1a..5c327ec4a 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -9,7 +9,9 @@ object Dependencies { // Templating! val handlebarsVersion = "2.1.1" - val handlebars = "com.gilt" %% "handlebars-scala" % handlebarsVersion + val handlebars = "com.gilt" %% "handlebars-scala" % handlebarsVersion exclude("org.slf4j", "slf4j-simple") + // org.slf4j.slf4j-simple's StaticLoggerBinder (from handlebars) conflicts with + // ch.qos.logback.logback-classic's StaticLoggerBinder (from firrtl). val commonDependencies: Seq[ModuleID] = Seq( scalatest, @@ -17,4 +19,4 @@ object Dependencies { handlebars ) -} \ No newline at end of file +} diff --git a/project/assembly.sbt b/project/assembly.sbt new file mode 100644 index 000000000..8956d3325 --- /dev/null +++ b/project/assembly.sbt @@ -0,0 +1,2 @@ +addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5") +addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.8.2") From 923a08dfa18da4ff59ac089eaf61e284021beea1 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 12:49:40 -0700 Subject: [PATCH 081/273] Fix typo --- macros/src/main/scala/MacroCompiler.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index d960a5db2..32a8b64a8 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -527,7 +527,7 @@ object MacroCompiler extends App { " -v, --verilog: Verilog output", " -c, --cost-func: Cost function to use. Optional (default: \"default\")", " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", - " --syn-flop: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes") mkString "\n" + " --syn-flops: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes") mkString "\n" def parseArgs(map: MacroParamMap, costMap: CostParamMap, synflops: Boolean, args: List[String]): (MacroParamMap, Boolean) = args match { From a25c84f72c9fdae71a8b43dd5e8719f10cce58e5 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 13:55:32 -0700 Subject: [PATCH 082/273] Specify cost function from command line --- macros/src/main/scala/MacroCompiler.scala | 95 +++++++++++++++++------ 1 file changed, 73 insertions(+), 22 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 32a8b64a8..64cee44eb 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -19,7 +19,7 @@ import Utils._ */ // TODO: eventually explore compiling a single target memory using multiple // different kinds of target memory. -trait CostMetric { +trait CostMetric extends Serializable { /** * Cost function that returns the cost of compiling a memory using a certain * macro. @@ -100,6 +100,32 @@ object NewDefaultMetric extends CostMetric { } } +object MacroCompilerUtil { + import java.io._ + import java.util.Base64 + + // Adapted from https://stackoverflow.com/a/134918 + + /** Serialize an arbitrary object to String. + * Used to pass structured values through as an annotation. */ + def objToString(o: Serializable): String = { + val baos: ByteArrayOutputStream = new ByteArrayOutputStream + val oos: ObjectOutputStream = new ObjectOutputStream(baos) + oos.writeObject(o) + oos.close() + return Base64.getEncoder.encodeToString(baos.toByteArray) + } + + /** Deserialize an arbitrary object from String. */ + def objFromString(s: String): AnyRef = { + val data = Base64.getDecoder.decode(s) + val ois: ObjectInputStream = new ObjectInputStream(new ByteArrayInputStream(data)) + val o = ois.readObject + ois.close() + return o + } +} + object CostMetric { /** Define some default metric. */ val default: CostMetric = NewDefaultMetric @@ -108,30 +134,48 @@ object CostMetric { def getCostMetric(m: String, params: Map[String, String]): CostMetric = m match { case "default" => default case "PalmerMetric" => PalmerMetric - case "ExternalMetric" => new ExternalMetric(params.get("path").get) + case "ExternalMetric" => { + try { + new ExternalMetric(params.get("path").get) + } catch { + case e: NoSuchElementException => throw new IllegalArgumentException("Missing parameter 'path'") + } + } case "NewDefaultMetric" => NewDefaultMetric case _ => throw new IllegalArgumentException("Invalid cost metric " + m) } } object MacroCompilerAnnotation { - def apply(c: String, mem: File, lib: Option[File], synflops: Boolean): Annotation = - apply(c, mem.toString, lib map (_.toString), synflops) + /** + * Parameters associated to this MacroCompilerAnnotation. + * @param mem Path to memory lib + * @param lib Path to library lib or None if no libraries + * @param costMetric Cost metric to use + * @param synflops True to syn flops + */ + case class Params(mem: String, lib: Option[String], costMetric: CostMetric, synflops: Boolean) + + /** + * Create a MacroCompilerAnnotation. + * @param c Name of the module(?) for this annotation. + * @param p Parameters (see above). + */ + def apply(c: String, p: Params): Annotation = + Annotation(CircuitName(c), classOf[MacroCompilerTransform], MacroCompilerUtil.objToString(p)) - def apply(c: String, mem: String, lib: Option[String], synflops: Boolean): Annotation = { - Annotation(CircuitName(c), classOf[MacroCompilerTransform], - s"${mem} %s ${synflops}".format(lib getOrElse "")) - } - private val matcher = "([^ ]+) ([^ ]*) (true|false)".r def unapply(a: Annotation) = a match { - case Annotation(CircuitName(c), t, matcher(mem, lib, synflops)) if t == classOf[MacroCompilerTransform] => - Some((c, Some(mem), if (lib.isEmpty) None else Some(lib), synflops.toBoolean)) + case Annotation(CircuitName(c), t, serialized) if t == classOf[MacroCompilerTransform] => { + val p: Params = MacroCompilerUtil.objFromString(serialized).asInstanceOf[Params] + Some(c, p) + } case _ => None } } class MacroCompilerPass(mems: Option[Seq[Macro]], - libs: Option[Seq[Macro]]) extends firrtl.passes.Pass { + libs: Option[Seq[Macro]], + costMetric: CostMetric = CostMetric.default) extends firrtl.passes.Pass { def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { val pairedPorts = mem.sortedPorts zip lib.sortedPorts @@ -437,7 +481,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], (best, cost) case ((best, cost), lib) => // Run the cost function to evaluate this potential compile. - CostMetric.default.cost(mem, lib) match { + costMetric.cost(mem, lib) match { case Some(newCost) => { System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") if (newCost > cost) (best, cost) @@ -469,10 +513,9 @@ class MacroCompilerTransform extends Transform { def inputForm = MidForm def outputForm = MidForm def execute(state: CircuitState) = getMyAnnotations(state) match { - case Seq(MacroCompilerAnnotation(state.circuit.main, memFile, libFile, synflops)) => - require(memFile.isDefined) + case Seq(MacroCompilerAnnotation(state.circuit.main, MacroCompilerAnnotation.Params(memFile, libFile, costMetric, synflops))) => // Read, eliminate None, get only SRAM, make firrtl macro - val mems: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(memFile) match { + val mems: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) match { case Some(x:Seq[mdf.macrolib.Macro]) => Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) case _ => None @@ -483,7 +526,7 @@ class MacroCompilerTransform extends Transform { case _ => None } val transforms = Seq( - new MacroCompilerPass(mems, libs), + new MacroCompilerPass(mems, libs, costMetric), new SynFlopsPass(synflops, libs getOrElse mems.get)) (transforms foldLeft state)((s, xform) => xform runTransform s).copy(form=outputForm) case _ => state @@ -529,9 +572,9 @@ object MacroCompiler extends App { " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", " --syn-flops: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes") mkString "\n" - def parseArgs(map: MacroParamMap, costMap: CostParamMap, synflops: Boolean, args: List[String]): (MacroParamMap, Boolean) = + def parseArgs(map: MacroParamMap, costMap: CostParamMap, synflops: Boolean, args: List[String]): (MacroParamMap, CostParamMap, Boolean) = args match { - case Nil => (map, synflops) + case Nil => (map, costMap, synflops) case ("-m" | "--macro-list") :: value :: tail => parseArgs(map + (Macros -> value), costMap, synflops, tail) case ("-l" | "--library") :: value :: tail => @@ -551,7 +594,7 @@ object MacroCompiler extends App { } def run(args: List[String]) { - val (params, synflops) = parseArgs(Map[MacroParam, String](), Map[String, String](), false, args) + val (params, costParams, synflops) = parseArgs(Map[MacroParam, String](), Map[String, String](), false, args) try { val macros = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) @@ -562,8 +605,16 @@ object MacroCompiler extends App { // Note: the last macro in the input list is (seemingly arbitrarily) // determined as the firrtl "top-level module". val circuit = Circuit(NoInfo, macros, macros.last.name) - val annotations = AnnotationMap(Seq(MacroCompilerAnnotation( - circuit.main, params.get(Macros).get, params.get(Library), synflops))) + val annotations = AnnotationMap( + Seq(MacroCompilerAnnotation( + circuit.main, + MacroCompilerAnnotation.Params( + params.get(Macros).get, params.get(Library), + CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), + synflops + ) + )) + ) val state = CircuitState(circuit, HighForm, Some(annotations)) // Run the compiler. From 0203aa9e7cb23a205fcebcc4b96c1f65e383ab22 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 14:03:09 -0700 Subject: [PATCH 083/273] Move notes to main file since they apply there as well --- macros/src/main/scala/MacroCompiler.scala | 6 ++++++ macros/src/test/scala/MacroCompilerSpec.scala | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 64cee44eb..966ea7bb1 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -1,5 +1,11 @@ // See LICENSE for license details. +/** + * Terminology note: + * mem - target memory to compile, in design (e.g. Mem() in rocket) + * lib - technology SRAM(s) to use to compile mem + */ + package barstools.macros import firrtl._ diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 5253623c8..907797456 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -7,12 +7,6 @@ import firrtl.Utils.ceilLog2 import java.io.{File, StringWriter} abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { - /** - * Terminology note: - * mem - target memory to compile, in design (e.g. Mem() in rocket) - * lib - technology SRAM(s) to use to compile mem - */ - val testDir: String = "test_run_dir/macros" new File(testDir).mkdirs // Make sure the testDir exists From 0f4683700f726e046ffe3b5ce4d0f0edbc600dc2 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 14:19:10 -0700 Subject: [PATCH 084/273] Add cost function selection test --- macros/src/main/scala/MacroCompiler.scala | 1 + macros/src/test/scala/CostFunction.scala | 111 ++++++++++++++++++ macros/src/test/scala/MacroCompilerSpec.scala | 5 +- 3 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 macros/src/test/scala/CostFunction.scala diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 966ea7bb1..18b5ad0a1 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -90,6 +90,7 @@ class ExternalMetric(path: String) extends CostMetric { } /** The current default metric in barstools, re-defined by Donggyu. */ +// TODO: write tests for this function to make sure it selects the right things object NewDefaultMetric extends CostMetric { override def cost(mem: Macro, lib: Macro): Option[BigInt] = { val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) diff --git a/macros/src/test/scala/CostFunction.scala b/macros/src/test/scala/CostFunction.scala new file mode 100644 index 000000000..71d1270dd --- /dev/null +++ b/macros/src/test/scala/CostFunction.scala @@ -0,0 +1,111 @@ +package barstools.macros + +import mdf.macrolib._ + +/** Tests to check that the cost function mechanism is working properly. */ + +/** + * A test metric that simply favours memories with smaller widths, to test that + * the metric is chosen properly. + */ +object TestMinWidthMetric extends CostMetric { + // Smaller width = lower cost = favoured + override def cost(mem: Macro, lib: Macro): Option[BigInt] = Some(lib.src.width) +} + +/** Test that cost metric selection is working. */ +class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { + val mem = s"mem-SelectCostMetric.json" + val lib = s"lib-SelectCostMetric.json" + val v = s"SelectCostMetric.v" + + override val costMetric = TestMinWidthMetric + + val libSRAMs = Seq( + SRAMMacro( + macroType=SRAM, + name="SRAM_WIDTH_128", + depth=1024, + width=128, + family="1rw", + ports=Seq( + generateReadWritePort("", 128, 1024) + ) + ), + SRAMMacro( + macroType=SRAM, + name="SRAM_WIDTH_64", + depth=1024, + width=64, + family="1rw", + ports=Seq( + generateReadWritePort("", 64, 1024) + ) + ), + SRAMMacro( + macroType=SRAM, + name="SRAM_WIDTH_32", + depth=1024, + width=32, + family="1rw", + ports=Seq( + generateReadWritePort("", 32, 1024) + ) + ) + ) + + val memSRAMs = Seq(generateSRAM("target_memory", "", 128, 1024)) + + writeToLib(lib, libSRAMs) + writeToMem(mem, memSRAMs) + + // Check that the min width SRAM was chosen, even though it is less efficient. + val output = +""" +circuit target_memory : + module target_memory : + input clk : Clock + input addr : UInt<10> + input din : UInt<128> + output dout : UInt<128> + input write_en : UInt<1> + + inst mem_0_0 of SRAM_WIDTH_32 + inst mem_0_1 of SRAM_WIDTH_32 + inst mem_0_2 of SRAM_WIDTH_32 + inst mem_0_3 of SRAM_WIDTH_32 + mem_0_0.clk <= clk + mem_0_0.addr <= addr + node dout_0_0 = bits(mem_0_0.dout, 31, 0) + mem_0_0.din <= bits(din, 31, 0) + mem_0_0.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.clk <= clk + mem_0_1.addr <= addr + node dout_0_1 = bits(mem_0_1.dout, 31, 0) + mem_0_1.din <= bits(din, 63, 32) + mem_0_1.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_2.clk <= clk + mem_0_2.addr <= addr + node dout_0_2 = bits(mem_0_2.dout, 31, 0) + mem_0_2.din <= bits(din, 95, 64) + mem_0_2.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_3.clk <= clk + mem_0_3.addr <= addr + node dout_0_3 = bits(mem_0_3.dout, 31, 0) + mem_0_3.din <= bits(din, 127, 96) + mem_0_3.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + node dout_0 = cat(dout_0_3, cat(dout_0_2, cat(dout_0_1, dout_0_0))) + dout <= mux(UInt<1>("h1"), dout_0, UInt<1>("h0")) + + extmodule SRAM_WIDTH_32 : + input clk : Clock + input addr : UInt<10> + input din : UInt<32> + output dout : UInt<32> + input write_en : UInt<1> + + defname = SRAM_WIDTH_32 +""" + + compileExecuteAndTest(mem, lib, v, output) +} diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 907797456..6dcc0efbf 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -15,6 +15,9 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate val libPrefix: String = testDir val vPrefix: String = testDir + // Override this to use a different cost metric. + val costMetric: CostMetric = CostMetric.default + private def args(mem: String, lib: Option[String], v: String, synflops: Boolean) = List("-m", mem.toString, "-v", v) ++ (lib match { case None => Nil case Some(l) => List("-l", l.toString) }) ++ @@ -80,7 +83,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate val macros = mems map (_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) val passes = Seq( - new MacroCompilerPass(Some(mems), libs), + new MacroCompilerPass(Some(mems), libs, costMetric), new SynFlopsPass(synflops, libs getOrElse mems), RemoveEmpty) val result: Circuit = (passes foldLeft circuit)((c, pass) => pass run c) From 4013b1924f3f3116d29d359869e9fa48a3ab96f0 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 19:02:56 -0700 Subject: [PATCH 085/273] Implement command line cost metric selection --- macros/src/main/scala/MacroCompiler.scala | 80 +++++++++++++++---- macros/src/test/scala/CostFunction.scala | 11 ++- macros/src/test/scala/MacroCompilerSpec.scala | 19 ++++- 3 files changed, 92 insertions(+), 18 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 18b5ad0a1..08f4ee345 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -36,12 +36,28 @@ trait CostMetric extends Serializable { * it cannot be compiled. */ def cost(mem: Macro, lib: Macro): Option[BigInt] + + /** + * Helper function to return the map of argments (or an empty map if there are none). + */ + def commandLineParams(): Map[String, String] + + // We also want this to show up for the class itself. + def name(): String +} + +// Is there a better way to do this? (static method associated to CostMetric) +trait CostMetricCompanion { + def name(): String + + /** Construct this cost metric from a command line mapping. */ + def construct(m: Map[String, String]): CostMetric } // Some default cost functions. /** Palmer's old metric. */ -object PalmerMetric extends CostMetric { +object PalmerMetric extends CostMetric with CostMetricCompanion { override def cost(mem: Macro, lib: Macro): Option[BigInt] = { /* Palmer: A quick cost function (that must be kept in sync with * memory_cost()) that attempts to avoid compiling unncessary @@ -51,6 +67,10 @@ object PalmerMetric extends CostMetric { // (mem.depth * mem.width) ??? } + + override def commandLineParams = Map() + override def name = "PalmerMetric" + override def construct(m: Map[String, String]) = PalmerMetric } /** @@ -87,11 +107,27 @@ class ExternalMetric(path: String) extends CostMetric { case e: NumberFormatException => None } } + + override def commandLineParams = Map("path" -> path) + override def name = ExternalMetric.name +} + +object ExternalMetric extends CostMetricCompanion { + override def name = "ExternalMetric" + + /** Construct this cost metric from a command line mapping. */ + override def construct(m: Map[String, String]) = { + val pathOption = m.get("path") + pathOption match { + case Some(path:String) => new ExternalMetric(path) + case _ => throw new IllegalArgumentException("ExternalMetric missing option 'path'") + } + } } /** The current default metric in barstools, re-defined by Donggyu. */ // TODO: write tests for this function to make sure it selects the right things -object NewDefaultMetric extends CostMetric { +object NewDefaultMetric extends CostMetric with CostMetricCompanion { override def cost(mem: Macro, lib: Macro): Option[BigInt] = { val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) @@ -105,6 +141,10 @@ object NewDefaultMetric extends CostMetric { (lib.src.depth * lib.src.width + 1) // weights on # cells ) } + + override def commandLineParams = Map() + override def name = "NewDefaultMetric" + override def construct(m: Map[String, String]) = NewDefaultMetric } object MacroCompilerUtil { @@ -137,19 +177,31 @@ object CostMetric { /** Define some default metric. */ val default: CostMetric = NewDefaultMetric - /** Select a cost function from string. */ - def getCostMetric(m: String, params: Map[String, String]): CostMetric = m match { - case "default" => default - case "PalmerMetric" => PalmerMetric - case "ExternalMetric" => { - try { - new ExternalMetric(params.get("path").get) - } catch { - case e: NoSuchElementException => throw new IllegalArgumentException("Missing parameter 'path'") - } + val costMetricCreators: scala.collection.mutable.Map[String, CostMetricCompanion] = scala.collection.mutable.Map() + + // Register some default metrics + registerCostMetric(PalmerMetric) + registerCostMetric(ExternalMetric) + registerCostMetric(NewDefaultMetric) + + /** + * Register a cost metric. + * @param createFuncHelper Companion object to fetch the name and construct + * the metric. + */ + def registerCostMetric(createFuncHelper: CostMetricCompanion): Unit = { + costMetricCreators.update(createFuncHelper.name, createFuncHelper) + } + + /** Select a cost metric from string. */ + def getCostMetric(m: String, params: Map[String, String]): CostMetric = { + if (m == "default") { + CostMetric.default + } else if (!costMetricCreators.contains(m)) { + throw new IllegalArgumentException("Invalid cost metric " + m) + } else { + costMetricCreators.get(m).get.construct(params) } - case "NewDefaultMetric" => NewDefaultMetric - case _ => throw new IllegalArgumentException("Invalid cost metric " + m) } } diff --git a/macros/src/test/scala/CostFunction.scala b/macros/src/test/scala/CostFunction.scala index 71d1270dd..44d25b683 100644 --- a/macros/src/test/scala/CostFunction.scala +++ b/macros/src/test/scala/CostFunction.scala @@ -8,9 +8,13 @@ import mdf.macrolib._ * A test metric that simply favours memories with smaller widths, to test that * the metric is chosen properly. */ -object TestMinWidthMetric extends CostMetric { +object TestMinWidthMetric extends CostMetric with CostMetricCompanion { // Smaller width = lower cost = favoured override def cost(mem: Macro, lib: Macro): Option[BigInt] = Some(lib.src.width) + + override def commandLineParams = Map() + override def name = "TestMinWidthMetric" + override def construct(m: Map[String, String]) = TestMinWidthMetric } /** Test that cost metric selection is working. */ @@ -19,7 +23,10 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { val lib = s"lib-SelectCostMetric.json" val v = s"SelectCostMetric.v" - override val costMetric = TestMinWidthMetric + // Cost metrics must be registered for them to work with the command line. + CostMetric.registerCostMetric(TestMinWidthMetric) + + override val costMetric = Some(TestMinWidthMetric) val libSRAMs = Seq( SRAMMacro( diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 6dcc0efbf..6fd95eb56 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -16,11 +16,26 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate val vPrefix: String = testDir // Override this to use a different cost metric. - val costMetric: CostMetric = CostMetric.default + // If this is None, the compile() call will not have any -c/-cp arguments, and + // execute() will use CostMetric.default. + val costMetric: Option[CostMetric] = None + private def getCostMetric: CostMetric = costMetric.getOrElse(CostMetric.default) + + private def costMetricCmdLine = { + costMetric match { + case None => Nil + case Some(m) => { + val name = m.name + val params = m.commandLineParams + List("-c", name) ++ params.flatMap{ case (key, value) => List("-cp", key, value) } + } + } + } private def args(mem: String, lib: Option[String], v: String, synflops: Boolean) = List("-m", mem.toString, "-v", v) ++ (lib match { case None => Nil case Some(l) => List("-l", l.toString) }) ++ + costMetricCmdLine ++ (if (synflops) List("--syn-flops") else Nil) // Run the full compiler as if from the command line interface. @@ -83,7 +98,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate val macros = mems map (_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) val passes = Seq( - new MacroCompilerPass(Some(mems), libs, costMetric), + new MacroCompilerPass(Some(mems), libs, getCostMetric), new SynFlopsPass(synflops, libs getOrElse mems), RemoveEmpty) val result: Circuit = (passes foldLeft circuit)((c, pass) => pass run c) From df8b5815c6852c11d1ceae7e8b0e1649c1e62164 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 19:50:17 -0700 Subject: [PATCH 086/273] Trim redundant MDF field --- macros/src/main/scala/Utils.scala | 2 +- macros/src/test/scala/CostFunction.scala | 3 --- macros/src/test/scala/MacroCompilerSpec.scala | 1 - macros/src/test/scala/SimpleSplitDepth.scala | 8 -------- macros/src/test/scala/SimpleSplitWidth.scala | 4 ---- macros/src/test/scala/SpecificExamples.scala | 7 ------- macros/src/test/scala/SynFlops.scala | 4 ---- mdf | 2 +- 8 files changed, 2 insertions(+), 29 deletions(-) diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 36a8ce7c4..78c5007b6 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -68,7 +68,7 @@ class Macro(srcMacro: SRAMMacro) { object Utils { def filterForSRAM(s: Option[Seq[mdf.macrolib.Macro]]): Option[Seq[mdf.macrolib.SRAMMacro]] = { s match { - case Some(l:Seq[mdf.macrolib.Macro]) => Some(l filter { _.macroType == mdf.macrolib.SRAM } map { m => m.asInstanceOf[mdf.macrolib.SRAMMacro] }) + case Some(l:Seq[mdf.macrolib.Macro]) => Some(l filter { _.isInstanceOf[mdf.macrolib.SRAMMacro] } map { m => m.asInstanceOf[mdf.macrolib.SRAMMacro] }) case _ => None } } diff --git a/macros/src/test/scala/CostFunction.scala b/macros/src/test/scala/CostFunction.scala index 44d25b683..b0fca093d 100644 --- a/macros/src/test/scala/CostFunction.scala +++ b/macros/src/test/scala/CostFunction.scala @@ -30,7 +30,6 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { val libSRAMs = Seq( SRAMMacro( - macroType=SRAM, name="SRAM_WIDTH_128", depth=1024, width=128, @@ -40,7 +39,6 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { ) ), SRAMMacro( - macroType=SRAM, name="SRAM_WIDTH_64", depth=1024, width=64, @@ -50,7 +48,6 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { ) ), SRAMMacro( - macroType=SRAM, name="SRAM_WIDTH_32", depth=1024, width=32, diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 6fd95eb56..afdfe4f8c 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -174,7 +174,6 @@ trait HasSRAMGenerator { // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). def generateSRAM(name: String, prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMMacro = { SRAMMacro( - macroType=SRAM, name=name, width=width, depth=depth, diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 0965715d2..d8be8fe53 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -337,7 +337,6 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener val v = "split_depth-r-w-split-lib-split-mem.v" val libMacro = SRAMMacro( - macroType=SRAM, name="awesome_lib_mem", width=width, depth=libDepth, @@ -349,7 +348,6 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener ) val memMacro = SRAMMacro( - macroType=SRAM, name="target_memory", width=width, depth=memDepth, @@ -426,7 +424,6 @@ circuit target_memory : val v = "split_depth-r-w-regular-lib-split-mem.v" val memMacro = SRAMMacro( - macroType=SRAM, name="target_memory", width=width, depth=memDepth, @@ -460,7 +457,6 @@ TODO val v = "split_depth-r-w-split-lib-regular-mem.v" val libMacro = SRAMMacro( - macroType=SRAM, name="awesome_lib_mem", width=width, depth=libDepth, @@ -502,7 +498,6 @@ class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerato val v = "split_depth-r-mw-split-lib-split-mem.v" val libMacro = SRAMMacro( - macroType=SRAM, name="awesome_lib_mem", width=width, depth=libDepth, @@ -514,7 +509,6 @@ class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerato ) val memMacro = SRAMMacro( - macroType=SRAM, name="target_memory", width=width, depth=memDepth, @@ -595,7 +589,6 @@ circuit target_memory : val v = "split_depth-r-mw-regular-lib-split-mem.v" val memMacro = SRAMMacro( - macroType=SRAM, name="target_memory", width=width, depth=memDepth, @@ -629,7 +622,6 @@ TODO val v = "split_depth-r-mw-split-lib-regular-mem.v" val libMacro = SRAMMacro( - macroType=SRAM, name="awesome_lib_mem", width=width, depth=libDepth, diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index b75b9fe9b..b25c7d1bc 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -407,7 +407,6 @@ class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGen override def generateLibSRAM() = { SRAMMacro( - macroType=SRAM, name=lib_name, width=libWidth, depth=libDepth, @@ -466,7 +465,6 @@ class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGen override def generateMemSRAM() = { SRAMMacro( - macroType=SRAM, name=mem_name, width=memWidth, depth=memDepth, @@ -493,7 +491,6 @@ class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAM override def generateLibSRAM() = { SRAMMacro( - macroType=SRAM, name=lib_name, width=libWidth, depth=libDepth, @@ -508,7 +505,6 @@ class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAM override def generateMemSRAM() = { SRAMMacro( - macroType=SRAM, name=mem_name, width=memWidth, depth=memDepth, diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 648c57bd3..628c39645 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -11,7 +11,6 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { val libSRAMs = Seq( SRAMMacro( - macroType=SRAM, name="SRAM1RW1024x8", depth=1024, width=8, @@ -21,7 +20,6 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { ) ), SRAMMacro( - macroType=SRAM, name="SRAM1RW512x32", depth=512, width=32, @@ -31,7 +29,6 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { ) ), SRAMMacro( - macroType=SRAM, name="SRAM1RW64x128", depth=64, width=128, @@ -41,7 +38,6 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { ) ), SRAMMacro( - macroType=SRAM, name="SRAM1RW64x32", depth=64, width=32, @@ -51,7 +47,6 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { ) ), SRAMMacro( - macroType=SRAM, name="SRAM1RW64x8", depth=64, width=8, @@ -61,7 +56,6 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { ) ), SRAMMacro( - macroType=SRAM, name="SRAM1RW512x8", depth=512, width=8, @@ -71,7 +65,6 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { ) ), SRAMMacro( - macroType=SRAM, name="SRAM2RW64x32", depth=64, width=32, diff --git a/macros/src/test/scala/SynFlops.scala b/macros/src/test/scala/SynFlops.scala index f273c29e4..eeac4c8ca 100644 --- a/macros/src/test/scala/SynFlops.scala +++ b/macros/src/test/scala/SynFlops.scala @@ -87,7 +87,6 @@ class Synflops_SplitPorts_Read_Write extends MacroCompilerSpec with HasSRAMGener override lazy val width = 8 override def generateLibSRAM = SRAMMacro( - macroType=SRAM, name=lib_name, width=width, depth=libDepth, @@ -99,7 +98,6 @@ class Synflops_SplitPorts_Read_Write extends MacroCompilerSpec with HasSRAMGener ) override def generateMemSRAM = SRAMMacro( - macroType=SRAM, name=mem_name, width=width, depth=memDepth, @@ -198,7 +196,6 @@ class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite extends MacroCompilerSpec w override lazy val libMaskGran = Some(1) override def generateLibSRAM = SRAMMacro( - macroType=SRAM, name=lib_name, width=width, depth=libDepth, @@ -210,7 +207,6 @@ class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite extends MacroCompilerSpec w ) override def generateMemSRAM = SRAMMacro( - macroType=SRAM, name=mem_name, width=width, depth=memDepth, diff --git a/mdf b/mdf index 893ca7476..9cb783025 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 893ca7476a6af689921a5ca99afe6df9c88fd3fd +Subproject commit 9cb783025873d29e2253287f9f00a9ef3f3e9c14 From f9edbfea270b3eea116a0538a6e226d4106fb0af Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 20:02:13 -0700 Subject: [PATCH 087/273] Move cost metric to its own file --- macros/src/main/scala/CostMetric.scala | 189 ++++++++++++++++++++++ macros/src/main/scala/MacroCompiler.scala | 186 --------------------- 2 files changed, 189 insertions(+), 186 deletions(-) create mode 100644 macros/src/main/scala/CostMetric.scala diff --git a/macros/src/main/scala/CostMetric.scala b/macros/src/main/scala/CostMetric.scala new file mode 100644 index 000000000..6e790f8ce --- /dev/null +++ b/macros/src/main/scala/CostMetric.scala @@ -0,0 +1,189 @@ +// See LICENSE for license details. + +package barstools.macros + +/** + * Trait which can calculate the cost of compiling a memory against a certain + * library memory macro using a cost function. + */ +// TODO: eventually explore compiling a single target memory using multiple +// different kinds of target memory. +trait CostMetric extends Serializable { + /** + * Cost function that returns the cost of compiling a memory using a certain + * macro. + * + * @param mem Memory macro to compile (target memory) + * @param lib Library memory macro to use (library memory) + * @return The cost of this compile, defined by this cost metric, or None if + * it cannot be compiled. + */ + def cost(mem: Macro, lib: Macro): Option[BigInt] + + /** + * Helper function to return the map of argments (or an empty map if there are none). + */ + def commandLineParams(): Map[String, String] + + // We also want this to show up for the class itself. + def name(): String +} + +// Is there a better way to do this? (static method associated to CostMetric) +trait CostMetricCompanion { + def name(): String + + /** Construct this cost metric from a command line mapping. */ + def construct(m: Map[String, String]): CostMetric +} + +// Some default cost functions. + +/** Palmer's old metric. */ +object PalmerMetric extends CostMetric with CostMetricCompanion { + override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + /* Palmer: A quick cost function (that must be kept in sync with + * memory_cost()) that attempts to avoid compiling unncessary + * memories. This is a lower bound on the cost of compiling a + * memory: it assumes 100% bit-cell utilization when mapping. */ + // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + + // (mem.depth * mem.width) + ??? + } + + override def commandLineParams = Map() + override def name = "PalmerMetric" + override def construct(m: Map[String, String]) = PalmerMetric +} + +/** + * An external cost function. + * Calls the specified path with paths to the JSON MDF representation of the mem + * and lib macros. The external executable should return a BigInt. + * None will be returned if the external executable does not return a valid + * BigInt. + */ +class ExternalMetric(path: String) extends CostMetric { + import mdf.macrolib.Utils.writeMacroToPath + import java.io._ + import scala.language.postfixOps // for !! postfix op + import sys.process._ + + override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + // Create temporary files. + val memFile = File.createTempFile("_macrocompiler_mem_", ".json") + val libFile = File.createTempFile("_macrocompiler_lib_", ".json") + + writeMacroToPath(Some(memFile.getAbsolutePath), mem.src) + writeMacroToPath(Some(libFile.getAbsolutePath), lib.src) + + // !! executes the given command + val result: String = (s"${path} ${memFile.getAbsolutePath} ${libFile.getAbsolutePath}" !!).trim + + // Remove temporary files. + memFile.delete() + libFile.delete() + + try { + Some(BigInt(result)) + } catch { + case e: NumberFormatException => None + } + } + + override def commandLineParams = Map("path" -> path) + override def name = ExternalMetric.name +} + +object ExternalMetric extends CostMetricCompanion { + override def name = "ExternalMetric" + + /** Construct this cost metric from a command line mapping. */ + override def construct(m: Map[String, String]) = { + val pathOption = m.get("path") + pathOption match { + case Some(path:String) => new ExternalMetric(path) + case _ => throw new IllegalArgumentException("ExternalMetric missing option 'path'") + } + } +} + +/** The current default metric in barstools, re-defined by Donggyu. */ +// TODO: write tests for this function to make sure it selects the right things +object NewDefaultMetric extends CostMetric with CostMetricCompanion { + override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) + val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) + val memWidth = (memMask, libMask) match { + case (Some(1), Some(1)) | (None, _) => mem.src.width + case (Some(p), _) => p // assume that the memory consists of smaller chunks + } + return Some( + (((mem.src.depth - 1) / lib.src.depth) + 1) * + (((memWidth - 1) / lib.src.width) + 1) * + (lib.src.depth * lib.src.width + 1) // weights on # cells + ) + } + + override def commandLineParams = Map() + override def name = "NewDefaultMetric" + override def construct(m: Map[String, String]) = NewDefaultMetric +} + +object MacroCompilerUtil { + import java.io._ + import java.util.Base64 + + // Adapted from https://stackoverflow.com/a/134918 + + /** Serialize an arbitrary object to String. + * Used to pass structured values through as an annotation. */ + def objToString(o: Serializable): String = { + val baos: ByteArrayOutputStream = new ByteArrayOutputStream + val oos: ObjectOutputStream = new ObjectOutputStream(baos) + oos.writeObject(o) + oos.close() + return Base64.getEncoder.encodeToString(baos.toByteArray) + } + + /** Deserialize an arbitrary object from String. */ + def objFromString(s: String): AnyRef = { + val data = Base64.getDecoder.decode(s) + val ois: ObjectInputStream = new ObjectInputStream(new ByteArrayInputStream(data)) + val o = ois.readObject + ois.close() + return o + } +} + +object CostMetric { + /** Define some default metric. */ + val default: CostMetric = NewDefaultMetric + + val costMetricCreators: scala.collection.mutable.Map[String, CostMetricCompanion] = scala.collection.mutable.Map() + + // Register some default metrics + registerCostMetric(PalmerMetric) + registerCostMetric(ExternalMetric) + registerCostMetric(NewDefaultMetric) + + /** + * Register a cost metric. + * @param createFuncHelper Companion object to fetch the name and construct + * the metric. + */ + def registerCostMetric(createFuncHelper: CostMetricCompanion): Unit = { + costMetricCreators.update(createFuncHelper.name, createFuncHelper) + } + + /** Select a cost metric from string. */ + def getCostMetric(m: String, params: Map[String, String]): CostMetric = { + if (m == "default") { + CostMetric.default + } else if (!costMetricCreators.contains(m)) { + throw new IllegalArgumentException("Invalid cost metric " + m) + } else { + costMetricCreators.get(m).get.construct(params) + } + } +} diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 08f4ee345..4f1607da6 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -19,192 +19,6 @@ import scala.collection.mutable.{ArrayBuffer, HashMap} import java.io.{File, FileWriter} import Utils._ -/** - * Trait which can calculate the cost of compiling a memory against a certain - * library memory macro using a cost function. - */ -// TODO: eventually explore compiling a single target memory using multiple -// different kinds of target memory. -trait CostMetric extends Serializable { - /** - * Cost function that returns the cost of compiling a memory using a certain - * macro. - * - * @param mem Memory macro to compile (target memory) - * @param lib Library memory macro to use (library memory) - * @return The cost of this compile, defined by this cost metric, or None if - * it cannot be compiled. - */ - def cost(mem: Macro, lib: Macro): Option[BigInt] - - /** - * Helper function to return the map of argments (or an empty map if there are none). - */ - def commandLineParams(): Map[String, String] - - // We also want this to show up for the class itself. - def name(): String -} - -// Is there a better way to do this? (static method associated to CostMetric) -trait CostMetricCompanion { - def name(): String - - /** Construct this cost metric from a command line mapping. */ - def construct(m: Map[String, String]): CostMetric -} - -// Some default cost functions. - -/** Palmer's old metric. */ -object PalmerMetric extends CostMetric with CostMetricCompanion { - override def cost(mem: Macro, lib: Macro): Option[BigInt] = { - /* Palmer: A quick cost function (that must be kept in sync with - * memory_cost()) that attempts to avoid compiling unncessary - * memories. This is a lower bound on the cost of compiling a - * memory: it assumes 100% bit-cell utilization when mapping. */ - // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + - // (mem.depth * mem.width) - ??? - } - - override def commandLineParams = Map() - override def name = "PalmerMetric" - override def construct(m: Map[String, String]) = PalmerMetric -} - -/** - * An external cost function. - * Calls the specified path with paths to the JSON MDF representation of the mem - * and lib macros. The external executable should return a BigInt. - * None will be returned if the external executable does not return a valid - * BigInt. - */ -class ExternalMetric(path: String) extends CostMetric { - import mdf.macrolib.Utils.writeMacroToPath - import java.io._ - import scala.language.postfixOps // for !! postfix op - import sys.process._ - - override def cost(mem: Macro, lib: Macro): Option[BigInt] = { - // Create temporary files. - val memFile = File.createTempFile("_macrocompiler_mem_", ".json") - val libFile = File.createTempFile("_macrocompiler_lib_", ".json") - - writeMacroToPath(Some(memFile.getAbsolutePath), mem.src) - writeMacroToPath(Some(libFile.getAbsolutePath), lib.src) - - // !! executes the given command - val result: String = (s"${path} ${memFile.getAbsolutePath} ${libFile.getAbsolutePath}" !!).trim - - // Remove temporary files. - memFile.delete() - libFile.delete() - - try { - Some(BigInt(result)) - } catch { - case e: NumberFormatException => None - } - } - - override def commandLineParams = Map("path" -> path) - override def name = ExternalMetric.name -} - -object ExternalMetric extends CostMetricCompanion { - override def name = "ExternalMetric" - - /** Construct this cost metric from a command line mapping. */ - override def construct(m: Map[String, String]) = { - val pathOption = m.get("path") - pathOption match { - case Some(path:String) => new ExternalMetric(path) - case _ => throw new IllegalArgumentException("ExternalMetric missing option 'path'") - } - } -} - -/** The current default metric in barstools, re-defined by Donggyu. */ -// TODO: write tests for this function to make sure it selects the right things -object NewDefaultMetric extends CostMetric with CostMetricCompanion { - override def cost(mem: Macro, lib: Macro): Option[BigInt] = { - val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) - val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) - val memWidth = (memMask, libMask) match { - case (Some(1), Some(1)) | (None, _) => mem.src.width - case (Some(p), _) => p // assume that the memory consists of smaller chunks - } - return Some( - (((mem.src.depth - 1) / lib.src.depth) + 1) * - (((memWidth - 1) / lib.src.width) + 1) * - (lib.src.depth * lib.src.width + 1) // weights on # cells - ) - } - - override def commandLineParams = Map() - override def name = "NewDefaultMetric" - override def construct(m: Map[String, String]) = NewDefaultMetric -} - -object MacroCompilerUtil { - import java.io._ - import java.util.Base64 - - // Adapted from https://stackoverflow.com/a/134918 - - /** Serialize an arbitrary object to String. - * Used to pass structured values through as an annotation. */ - def objToString(o: Serializable): String = { - val baos: ByteArrayOutputStream = new ByteArrayOutputStream - val oos: ObjectOutputStream = new ObjectOutputStream(baos) - oos.writeObject(o) - oos.close() - return Base64.getEncoder.encodeToString(baos.toByteArray) - } - - /** Deserialize an arbitrary object from String. */ - def objFromString(s: String): AnyRef = { - val data = Base64.getDecoder.decode(s) - val ois: ObjectInputStream = new ObjectInputStream(new ByteArrayInputStream(data)) - val o = ois.readObject - ois.close() - return o - } -} - -object CostMetric { - /** Define some default metric. */ - val default: CostMetric = NewDefaultMetric - - val costMetricCreators: scala.collection.mutable.Map[String, CostMetricCompanion] = scala.collection.mutable.Map() - - // Register some default metrics - registerCostMetric(PalmerMetric) - registerCostMetric(ExternalMetric) - registerCostMetric(NewDefaultMetric) - - /** - * Register a cost metric. - * @param createFuncHelper Companion object to fetch the name and construct - * the metric. - */ - def registerCostMetric(createFuncHelper: CostMetricCompanion): Unit = { - costMetricCreators.update(createFuncHelper.name, createFuncHelper) - } - - /** Select a cost metric from string. */ - def getCostMetric(m: String, params: Map[String, String]): CostMetric = { - if (m == "default") { - CostMetric.default - } else if (!costMetricCreators.contains(m)) { - throw new IllegalArgumentException("Invalid cost metric " + m) - } else { - costMetricCreators.get(m).get.construct(params) - } - } -} - object MacroCompilerAnnotation { /** * Parameters associated to this MacroCompilerAnnotation. From e89079f2d7fdebe7150783ed0f7523a690f4d2b6 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 1 Aug 2017 20:02:26 -0700 Subject: [PATCH 088/273] Test for non-empty Verilog --- macros/src/test/scala/SpecificExamples.scala | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 628c39645..7167ce619 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -4,6 +4,23 @@ import mdf.macrolib._ // Specific one-off tests to run, not created by a generator. +// Check that verilog actually gets generated. +// TODO: check the actual verilog's correctness? +class GenerateSomeVerilog extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { + override lazy val width = 32 + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 + + it should "execute fine" in { + compileExecuteAndTest(mem, lib, v, output) + } + + it should "generate non-empty verilog" in { + val verilog = scala.io.Source.fromFile(vPrefix + "/" + v).getLines().mkString("\n") + verilog.isEmpty shouldBe false + } +} + class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { val mem = s"mem-RocketChipTest.json" val lib = s"lib-RocketChipTest.json" From e726daec41354bb64d73ab060c51d718b804efa5 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 2 Aug 2017 11:40:58 -0700 Subject: [PATCH 089/273] Bump mdf --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index 9cb783025..b3677862b 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 9cb783025873d29e2253287f9f00a9ef3f3e9c14 +Subproject commit b3677862b21a59a63b20dd963ce9c518f293cfac From 676b8e72bab719fea59870eed510f99cb1978bae Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Thu, 3 Aug 2017 13:13:16 -0700 Subject: [PATCH 090/273] Add rocket-chip inspired tests --- macros/src/main/scala/MacroCompiler.scala | 1 - macros/src/test/scala/Masks.scala | 152 ++++++++++++++++++++++ 2 files changed, 152 insertions(+), 1 deletion(-) create mode 100644 macros/src/test/scala/Masks.scala diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 4f1607da6..6c0306a82 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -223,7 +223,6 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } else { require(libPort.src.effectiveMaskGran == 1, "only single-bit mask supported for now") - require(isPowerOfTwo(memPort.src.effectiveMaskGran), "only powers of two masks supported for now") require(isPowerOfTwo(libPort.src.effectiveMaskGran), "only powers of two masks supported for now") cat(((low to high) map (i => bits(WRef(mem), i / memPort.src.effectiveMaskGran))).reverse) diff --git a/macros/src/test/scala/Masks.scala b/macros/src/test/scala/Masks.scala new file mode 100644 index 000000000..c00a162d0 --- /dev/null +++ b/macros/src/test/scala/Masks.scala @@ -0,0 +1,152 @@ +package barstools.macros + +import mdf.macrolib._ + +// Test the ability of the compiler to deal with various mask combinations. + +// Simple powers of two with bit-masked lib. + +trait MasksTestSettings { + this: MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator => + override lazy val memDepth = 2048 + override lazy val libDepth = 1024 +} + +class Masks_PowersOfTwo_8_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 64 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_PowersOfTwo_16_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 64 + override lazy val memMaskGran = Some(16) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_PowersOfTwo_32_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 64 + override lazy val memMaskGran = Some(32) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_PowersOfTwo_64_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 64 + override lazy val memMaskGran = Some(64) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +// Simple powers of two with non bit-masked lib. + +class Masks_PowersOfTwo_32_4 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 128 + override lazy val memMaskGran = Some(32) + override lazy val libMaskGran = Some(4) + + it should "be enabled when non-power of two masks are supported" is (pending) + //compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_PowersOfTwo_32_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 128 + override lazy val memMaskGran = Some(32) + override lazy val libMaskGran = Some(8) + + it should "be enabled when non-power of two masks are supported" is (pending) + //compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_PowersOfTwo_8_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 128 + override lazy val memMaskGran = Some(8) + override lazy val libMaskGran = Some(8) + + it should "be enabled when non-power of two masks are supported" is (pending) + //compileExecuteAndTest(mem, lib, v, output) +} + +// Width as a multiple of the mask, bit-masked lib + +class Masks_IntegerMaskMultiple_20_10 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 20 + override lazy val memMaskGran = Some(10) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_IntegerMaskMultiple_21_7 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 21 + override lazy val memMaskGran = Some(21) + override lazy val libMaskGran = Some(7) + + it should "be enabled when non-power of two masks are supported" is (pending) + //~ compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_IntegerMaskMultiple_21_21 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 21 + override lazy val memMaskGran = Some(21) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_IntegerMaskMultiple_84_21 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 84 + override lazy val memMaskGran = Some(21) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_IntegerMaskMultiple_92_23 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 92 + override lazy val memMaskGran = Some(23) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_IntegerMaskMultiple_117_13 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 117 + override lazy val memMaskGran = Some(13) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_IntegerMaskMultiple_160_20 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 160 + override lazy val memMaskGran = Some(20) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_IntegerMaskMultiple_184_23 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 184 + override lazy val memMaskGran = Some(23) + override lazy val libMaskGran = Some(1) + + compileExecuteAndTest(mem, lib, v, output) +} + +// Width as an non-integer multiple of the mask, bit-masked lib + +class Masks_NonIntegerMaskMultiple_32_3 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 32 + override lazy val memMaskGran = Some(3) + override lazy val libMaskGran = Some(1) + + it should "be enabled when non-power of two masks are supported" is (pending) + //~ compileExecuteAndTest(mem, lib, v, output) +} From 5d3bebd2b91cddebb7c1ab750a5150277231b1cf Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 9 Aug 2017 00:57:38 -0700 Subject: [PATCH 091/273] Re-implement parallel mapping - Support byte-masked SRAM, yay - Also nuke a bunch of bugs --- macros/src/main/scala/MacroCompiler.scala | 145 ++++++++++++++---- macros/src/test/scala/MacroCompilerSpec.scala | 55 ++++++- macros/src/test/scala/Masks.scala | 95 +++++++++++- macros/src/test/scala/SimpleSplitDepth.scala | 21 +-- macros/src/test/scala/SimpleSplitWidth.scala | 38 ++--- macros/src/test/scala/SpecificExamples.scala | 89 +++-------- 6 files changed, 293 insertions(+), 150 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 6c0306a82..162e015ae 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -53,35 +53,113 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], val pairedPorts = mem.sortedPorts zip lib.sortedPorts // Parallel mapping - val pairs = ArrayBuffer[(BigInt, BigInt)]() - var last = 0 - for (i <- 0 until mem.src.width) { - if (i <= last + 1) { - /* Palmer: Every memory is going to have to fit at least a single bit. */ - // continue - } else if ((i - last) % lib.src.width.toInt == 0) { - /* Palmer: It's possible that we rolled over a memory's width here, - if so generate one. */ - pairs += ((last, i-1)) - last = i - } else { - /* Palmer: FIXME: This is a mess, I must just be super confused. */ - for ((memPort, libPort) <- pairedPorts) { - (memPort.src.maskGran, libPort.src.maskGran) match { - case (_, Some(p)) if p == 1 => // continue - case (Some(p), _) if i % p == 0 => - pairs += ((last, i-1)) - last = i - case (_, None) => // continue - case (_, Some(p)) if p == lib.src.width => // continue - case _ => - System.err println "Bit-mask (or unmasked) target memories are supported only" - return None + + /** + * This is a list of submemories by width. + * The tuples are (lsb, msb) inclusive. + * e.g. (0, 7) and (8, 15) might be a split for a width=16 memory into two + * width=8 memories. + */ + val bitPairs = ArrayBuffer[(BigInt, BigInt)]() + var currentLSB = 0 + + // Process every bit in the mem width. + for (memBit <- 0 until mem.src.width) { + val bitsInCurrentMem = memBit - currentLSB + + /** + * Helper function to check if it's time to split memories. + * @param effectiveLibWidth Split memory when we have this many bits. + */ + def splitMemory(effectiveLibWidth: Int): Unit = { + if (bitsInCurrentMem == effectiveLibWidth) { + bitPairs += ((currentLSB, memBit - 1)) + currentLSB = memBit + } + } + + for ((memPort, libPort) <- pairedPorts) { + + // Make sure we don't have a maskGran larger than the width of the memory. + assert (memPort.src.effectiveMaskGran <= memPort.src.width) + assert (libPort.src.effectiveMaskGran <= libPort.src.width) + + val libWidth = libPort.src.width + + // Don't consider cases of maskGran == width as "masked" since those masks + // effectively function as write-enable bits. + val memMask = if (memPort.src.effectiveMaskGran == memPort.src.width) None else memPort.src.maskGran + val libMask = if (libPort.src.effectiveMaskGran == libPort.src.width) None else libPort.src.maskGran + + (memMask, libMask) match { + // Neither lib nor mem is masked. + // No problems here. + case (None, None) => splitMemory(libWidth) + + // Only the lib is masked. + // Not an issue; we can just make all the bits in the lib mask enabled. + case (None, Some(p)) => splitMemory(libWidth) + + // Only the mem is masked. + case (Some(p), None) => { + if (p % libPort.src.width == 0) { + // If the mem mask is a multiple of the lib width, then we're good. + // Just roll over every lib width as usual. + // e.g. lib width=4, mem maskGran={4, 8, 12, 16, ...} + splitMemory(libWidth) + } else if (libPort.src.width % p == 0) { + // Lib width is a multiple of the mem mask. + // Consider the case where mem mask = 4 but lib width = 8, unmasked. + // We can still compile, but will need to waste the extra bits. + splitMemory(memMask.get) + } else { + // No neat multiples. + // We might still be able to compile extremely inefficiently. + if (p < libPort.src.width) { + // Compile using mem mask as the effective width. (note that lib is not masked) + // e.g. mem mask = 3, lib width = 8 + splitMemory(memMask.get) + } else { + // e.g. mem mask = 13, lib width = 8 + System.err.println(s"Unmasked target memory: unaligned mem maskGran ${p} with lib (${lib.src.name}) width ${libPort.src.width} not supported") + return None + } + } + } + + // Both lib and mem are masked. + case (Some(m), Some(l)) => { + if (m == l) { + // Lib maskGran == mem maskGran, no problems + splitMemory(libWidth) + } else if (m > l) { + // Mem maskGran > lib maskGran + if (m % l == 0) { + // Mem maskGran is a multiple of lib maskGran, carry on as normal. + splitMemory(libWidth) + } else { + System.err.println(s"Mem maskGran ${m} is not a multiple of lib maskGran ${l}: currently not supported") + return None + } + } else { // m < l + // Lib maskGran > mem maskGran. + if (l % m == 0) { + // Lib maskGran is a multiple of mem maskGran. + // e.g. lib maskGran = 8, mem maskGran = 4. + // In this case we can only compile very wastefully (by treating + // lib as a mem maskGran width memory) :( + splitMemory(memMask.get) + } else { + System.err.println(s"Lib maskGran ${m} is not a multiple of mem maskGran ${l}: currently not supported") + return None + } + } } } } } - pairs += ((last, mem.src.width.toInt - 1)) + // Add in the last chunk if there are any leftovers + bitPairs += ((currentLSB, mem.src.width.toInt - 1)) // Serial mapping val stmts = ArrayBuffer[Statement]() @@ -116,7 +194,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } } for ((off, i) <- (0 until mem.src.depth by lib.src.depth).zipWithIndex) { - for (j <- pairs.indices) { + for (j <- bitPairs.indices) { val name = s"mem_${i}_${j}" stmts += WDefInstance(NoInfo, name, lib.src.name, lib.tpe) // connect extra ports @@ -141,7 +219,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], and(e, addrMatch) } val cats = ArrayBuffer[Expression]() - for (((low, high), j) <- pairs.zipWithIndex) { + for (((low, high), j) <- bitPairs.zipWithIndex) { val inst = WRef(s"mem_${i}_${j}", lib.tpe) def connectPorts2(mem: Expression, @@ -221,11 +299,18 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], if (libPort.src.effectiveMaskGran == libPort.src.width) { bits(WRef(mem), low / memPort.src.effectiveMaskGran) } else { - require(libPort.src.effectiveMaskGran == 1, "only single-bit mask supported for now") - require(isPowerOfTwo(libPort.src.effectiveMaskGran), "only powers of two masks supported for now") - cat(((low to high) map (i => bits(WRef(mem), i / memPort.src.effectiveMaskGran))).reverse) + val effectiveLibWidth = if (memPort.src.maskGran.get < libPort.src.effectiveMaskGran) memPort.src.maskGran.get else libPort.src.width + cat(((0 until libPort.src.width by libPort.src.effectiveMaskGran) map (i => { + if (memPort.src.maskGran.get < libPort.src.effectiveMaskGran && i >= effectiveLibWidth) { + // If the memMaskGran is smaller than the lib's gran, then + // zero out the upper bits. + zero + } else { + bits(WRef(mem), (low + i) / memPort.src.effectiveMaskGran) + } + })).reverse) } case None => /* Palmer: If there is no input port on the source memory port diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index afdfe4f8c..6c941a3fa 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -201,6 +201,9 @@ trait HasSimpleTestGenerator { def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() def extraTag: String = "" + // "Effective" libMaskGran by considering write_enable. + val effectiveLibMaskGran = libMaskGran.getOrElse(libWidth) + // Override this in the sub-generator if you need a more specific name. // Defaults to using reflection to pull the name of the test using this // generator. @@ -242,16 +245,62 @@ trait HasSimpleTestGenerator { writeToLib(lib, Seq(libSRAM)) writeToMem(mem, Seq(memSRAM)) + // For masks, width it's a bit tricky since we have to consider cases like + // memMaskGran = 4 and libMaskGran = 8. + // Consider the actually usable libWidth in cases like the above. + val usableLibWidth = if (memMaskGran.getOrElse(Int.MaxValue) < effectiveLibMaskGran) memMaskGran.get else libWidth + // Number of lib instances needed to hold the mem, in both directions. // Round up (e.g. 1.5 instances = effectively 2 instances) val depthInstances = math.ceil(memDepth.toFloat / libDepth).toInt - val widthInstances = math.ceil(memWidth.toFloat / libWidth).toInt + val widthInstances = math.ceil(memWidth.toFloat / usableLibWidth).toInt + // Number of width bits in the last width-direction memory. // e.g. if memWidth = 16 and libWidth = 8, this would be 8 since the last memory 0_1 has 8 bits of input width. // e.g. if memWidth = 9 and libWidth = 8, this would be 1 since the last memory 0_1 has 1 bit of input width. - val lastWidthBits = if (memWidth % libWidth == 0) libWidth else (memWidth % libWidth) + val lastWidthBits = if (memWidth % usableLibWidth == 0) usableLibWidth else (memWidth % usableLibWidth) val selectBits = mem_addr_width - lib_addr_width + /** + * Convenience function to generate a mask statement. + * @param widthInst Width instance (mem_0_x) + * @param depthInst Depth instance (mem_x_0) + */ + def generateMaskStatement(widthInst: Int, depthInst: Int): String = { + // Width of this submemory. + val myMemWidth = if (widthInst == widthInstances - 1) lastWidthBits else usableLibWidth + // Base bit of this submemory. + // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this + // would be 16. + val myBaseBit = usableLibWidth*widthInst + + if (libMaskGran.isDefined) { + if (memMaskGran.isEmpty) { + // If there is no memory mask, we should just turn all the lib mask + // bits high. + s"""mem_${depthInst}_${widthInst}.lib_mask <= UInt<${libMaskBits}>("h${((1 << libMaskBits) - 1).toHexString}")""" + } else { + // Calculate which bit of outer_mask contains the given bit. + // e.g. if memMaskGran = 2, libMaskGran = 1 and libWidth = 4, then + // calculateMaskBit({0, 1}) = 0 and calculateMaskBit({1, 2}) = 1 + def calculateMaskBit(bit:Int): Int = bit / memMaskGran.getOrElse(memWidth) + + val bitsArr = ((libMaskBits - 1 to 0 by -1) map (x => { + if (x*libMaskGran.get > myMemWidth) { + // If we have extra mask bits leftover after the effective width, + // disable those bits. + """UInt<1>("h0")""" + } else { + val outerMaskBit = calculateMaskBit(x*libMaskGran.get + myBaseBit) + s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" + } + })) + val maskVal = bitsArr.reduceRight((bit, rest) => s"cat($bit, $rest)") + s"mem_${depthInst}_${widthInst}.lib_mask <= ${maskVal}" + } + } else "" + } + // Generate the header (contains the circuit statement and the target memory // module. def generateHeader(): String = { @@ -293,8 +342,6 @@ circuit $mem_name : def generateFooter(): String = { require (libSRAM.ports.size == 1, "Footer generator only supports single port lib") - val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input ${libPortPrefix}_read_en : UInt<1>" else "" - val footerMask = if (libHasMask) s"input ${libPortPrefix}_mask : UInt<${libMaskBits}>" else "" s""" extmodule $lib_name : ${generateFooterPorts} diff --git a/macros/src/test/scala/Masks.scala b/macros/src/test/scala/Masks.scala index c00a162d0..b4fef89a8 100644 --- a/macros/src/test/scala/Masks.scala +++ b/macros/src/test/scala/Masks.scala @@ -4,14 +4,105 @@ import mdf.macrolib._ // Test the ability of the compiler to deal with various mask combinations. -// Simple powers of two with bit-masked lib. - trait MasksTestSettings { this: MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator => override lazy val memDepth = 2048 override lazy val libDepth = 1024 } +// Try all four different kinds of mask config: +/** + * + * Non-masked mem Masked mem + * --------------------------------- + * Non-masked lib | | | + * --------------------------------- + * Masked lib | | | + * --------------------------------- + */ + +class Masks_FourTypes_NonMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val memMaskGran = None + override lazy val libWidth = 8 + override lazy val libMaskGran = None + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_FourTypes_NonMaskedMem_MaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val memMaskGran = None + override lazy val libWidth = 8 + override lazy val libMaskGran = Some(2) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_FourTypes_MaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val memMaskGran = Some(8) + override lazy val libWidth = 8 + override lazy val libMaskGran = None + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val memMaskGran = Some(4) + override lazy val libWidth = 8 + override lazy val libMaskGran = None + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_FourTypes_MaskedMem_MaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val memMaskGran = Some(8) + override lazy val libWidth = 16 + override lazy val libMaskGran = Some(4) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 32 + override lazy val memMaskGran = Some(8) + override lazy val libWidth = 16 + override lazy val libMaskGran = Some(8) + + compileExecuteAndTest(mem, lib, v, output) +} + +class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 64 + override lazy val memMaskGran = Some(4) + override lazy val libWidth = 32 + override lazy val libMaskGran = Some(8) + + compileExecuteAndTest(mem, lib, v, output) +} + +// FPGA-style byte-masked memories. + +class Masks_FPGAStyle_32_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { + override lazy val width = 32 + override lazy val memMaskGran = Some(32) + override lazy val libMaskGran = Some(8) + + compileExecuteAndTest(mem, lib, v, output) +} + +// Simple powers of two with bit-masked lib. + class Masks_PowersOfTwo_8_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 64 override lazy val memMaskGran = Some(8) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index d8be8fe53..442e44c75 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -29,26 +29,7 @@ s""" } for (i <- 0 to depthInstances - 1) { - // We only support simple masks for now (either libMask == memMask or libMask == 1) - val maskStatement = if (libHasMask) { - if (libMaskGran.get == memMaskGran.get) { - s"""mem_${i}_0.lib_mask <= bits(outer_mask, 0, 0)""" - } else if (libMaskGran.get == 1) { - // Construct a mask string. - // Each bit gets the # of bits specified in maskGran. - // Specify in descending order (MSB first) - - // This builds an array like m[1], m[1], m[0], m[0] - val maskBitsArr: Seq[String] = ((memMaskBits - 1 to 0 by -1) flatMap (maskBit => { - ((0 to memMaskGran.get - 1) map (_ => s"bits(outer_mask, ${maskBit}, ${maskBit})")) - })) - // Now build it into a recursive string like - // cat(m[1], cat(m[1], cat(m[0], m[0]))) - val maskBitsStr: String = maskBitsArr.reverse.tail.foldLeft(maskBitsArr.reverse.head)((prev: String, next: String) => s"cat(${next}, ${prev})") - s"""mem_${i}_0.lib_mask <= ${maskBitsStr}""" - } else "" // TODO: implement when non-bitmasked memories are supported - } else "" // No mask - + val maskStatement = generateMaskStatement(0, i) val enableIdentifier = if (selectBits > 0) s"""eq(outer_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" output.append( s""" diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index b25c7d1bc..de00a2352 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -24,40 +24,28 @@ trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { // Generate submemory connection blocks. output append (for (i <- 0 to widthInstances - 1) yield { // Width of this submemory. - val myMemWidth = if (i == widthInstances - 1) lastWidthBits else libWidth + val myMemWidth = if (i == widthInstances - 1) lastWidthBits else usableLibWidth // Base bit of this submemory. // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this // would be 16. - val myBaseBit = libWidth*i - - val maskStatement = if (libMaskGran.isDefined) { - if (memMaskGran.isEmpty) { - // If there is no memory mask, we should just turn all the lib mask - // bits high. - s"""mem_0_${i}.lib_mask <= UInt<${libMaskBits}>("h${((1 << libMaskBits) - 1).toHexString}")""" - } else if (libMaskGran.get == memMaskGran.get) { - s"mem_0_${i}.lib_mask <= bits(outer_mask, ${i}, ${i})" - } else if (libMaskGran.get == 1) { - // Calculate which bit of outer_mask contains the given bit. - // e.g. if memMaskGran = 2, libMaskGran = 1 and libWidth = 4, then - // calculateMaskBit({0, 1}) = 0 and calculateMaskBit({1, 2}) = 1 - def calculateMaskBit(bit:Int): Int = (bit / libMaskGran.get) / memMaskGran.getOrElse(memWidth) - - val bitsArr = ((libMaskBits - 1 to 0 by -1) map (x => { - val outerMaskBit = calculateMaskBit(x*libMaskGran.get + myBaseBit) - s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" - })) - val maskVal = bitsArr.init.foldRight(bitsArr.last)((bit, rest) => s"cat($bit, $rest)") - s"mem_0_${i}.lib_mask <= ${maskVal}" - } else "" // We support only bit-level masks for now. - } else "" + val myBaseBit = usableLibWidth*i + + val maskStatement = generateMaskStatement(i, 0) + + // We need to use writeEnable as a crude "mask" if mem has a mask but + // lib does not. + val writeEnableBit = if (libMaskGran.isEmpty && memMaskGran.isDefined) { + val outerMaskBit = myBaseBit / memMaskGran.get + s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" + } else """UInt<1>("h1")""" + s""" mem_0_${i}.lib_clk <= outer_clk mem_0_${i}.lib_addr <= outer_addr node outer_dout_0_${i} = bits(mem_0_${i}.lib_dout, ${myMemWidth - 1}, 0) mem_0_${i}.lib_din <= bits(outer_din, ${myBaseBit + myMemWidth - 1}, ${myBaseBit}) ${maskStatement} - mem_0_${i}.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_${i}.lib_write_en <= and(and(outer_write_en, ${writeEnableBit}), UInt<1>("h1")) """ }).reduceLeft(_ + _) diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 7167ce619..fb884084a 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -188,90 +188,41 @@ circuit T_2172_ext : input RW0_wmode : UInt<1> input RW0_wmask : UInt<4> - inst mem_0_0 of SRAM1RW64x8 - inst mem_0_1 of SRAM1RW64x8 - inst mem_0_2 of SRAM1RW64x8 - inst mem_0_3 of SRAM1RW64x8 - inst mem_0_4 of SRAM1RW64x8 - inst mem_0_5 of SRAM1RW64x8 - inst mem_0_6 of SRAM1RW64x8 - inst mem_0_7 of SRAM1RW64x8 - inst mem_0_8 of SRAM1RW64x8 - inst mem_0_9 of SRAM1RW64x8 - inst mem_0_10 of SRAM1RW64x8 - inst mem_0_11 of SRAM1RW64x8 + inst mem_0_0 of SRAM1RW64x32 + inst mem_0_1 of SRAM1RW64x32 + inst mem_0_2 of SRAM1RW64x32 + inst mem_0_3 of SRAM1RW64x32 mem_0_0.clk <= RW0_clk mem_0_0.addr <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.dout, 7, 0) - mem_0_0.din <= bits(RW0_wdata, 7, 0) + node RW0_rdata_0_0 = bits(mem_0_0.dout, 19, 0) + mem_0_0.din <= bits(RW0_wdata, 19, 0) mem_0_0.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) mem_0_1.clk <= RW0_clk mem_0_1.addr <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.dout, 7, 0) - mem_0_1.din <= bits(RW0_wdata, 15, 8) - mem_0_1.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) + node RW0_rdata_0_1 = bits(mem_0_1.dout, 19, 0) + mem_0_1.din <= bits(RW0_wdata, 39, 20) + mem_0_1.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) mem_0_2.clk <= RW0_clk mem_0_2.addr <= RW0_addr - node RW0_rdata_0_2 = bits(mem_0_2.dout, 3, 0) - mem_0_2.din <= bits(RW0_wdata, 19, 16) - mem_0_2.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) + node RW0_rdata_0_2 = bits(mem_0_2.dout, 19, 0) + mem_0_2.din <= bits(RW0_wdata, 59, 40) + mem_0_2.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) mem_0_3.clk <= RW0_clk mem_0_3.addr <= RW0_addr - node RW0_rdata_0_3 = bits(mem_0_3.dout, 7, 0) - mem_0_3.din <= bits(RW0_wdata, 27, 20) - mem_0_3.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) - mem_0_4.clk <= RW0_clk - mem_0_4.addr <= RW0_addr - node RW0_rdata_0_4 = bits(mem_0_4.dout, 7, 0) - mem_0_4.din <= bits(RW0_wdata, 35, 28) - mem_0_4.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) - mem_0_5.clk <= RW0_clk - mem_0_5.addr <= RW0_addr - node RW0_rdata_0_5 = bits(mem_0_5.dout, 3, 0) - mem_0_5.din <= bits(RW0_wdata, 39, 36) - mem_0_5.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) - mem_0_6.clk <= RW0_clk - mem_0_6.addr <= RW0_addr - node RW0_rdata_0_6 = bits(mem_0_6.dout, 7, 0) - mem_0_6.din <= bits(RW0_wdata, 47, 40) - mem_0_6.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) - mem_0_7.clk <= RW0_clk - mem_0_7.addr <= RW0_addr - node RW0_rdata_0_7 = bits(mem_0_7.dout, 7, 0) - mem_0_7.din <= bits(RW0_wdata, 55, 48) - mem_0_7.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) - mem_0_8.clk <= RW0_clk - mem_0_8.addr <= RW0_addr - node RW0_rdata_0_8 = bits(mem_0_8.dout, 3, 0) - mem_0_8.din <= bits(RW0_wdata, 59, 56) - mem_0_8.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) - mem_0_9.clk <= RW0_clk - mem_0_9.addr <= RW0_addr - node RW0_rdata_0_9 = bits(mem_0_9.dout, 7, 0) - mem_0_9.din <= bits(RW0_wdata, 67, 60) - mem_0_9.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) - mem_0_10.clk <= RW0_clk - mem_0_10.addr <= RW0_addr - node RW0_rdata_0_10 = bits(mem_0_10.dout, 7, 0) - mem_0_10.din <= bits(RW0_wdata, 75, 68) - mem_0_10.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) - mem_0_11.clk <= RW0_clk - mem_0_11.addr <= RW0_addr - node RW0_rdata_0_11 = bits(mem_0_11.dout, 3, 0) - mem_0_11.din <= bits(RW0_wdata, 79, 76) - mem_0_11.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) - node RW0_rdata_0 = cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))) + node RW0_rdata_0_3 = bits(mem_0_3.dout, 19, 0) + mem_0_3.din <= bits(RW0_wdata, 79, 60) + mem_0_3.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) + node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) - extmodule SRAM1RW64x8 : + extmodule SRAM1RW64x32 : input clk : Clock input addr : UInt<6> - input din : UInt<8> - output dout : UInt<8> + input din : UInt<32> + output dout : UInt<32> input write_en : UInt<1> - defname = SRAM1RW64x8 - + defname = SRAM1RW64x32 module T_1090_ext : input RW0_clk : Clock From af67540a8124e046e81e7687339568ea1912adfb Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 9 Aug 2017 01:06:39 -0700 Subject: [PATCH 092/273] Add test from Donggyu --- macros/src/test/scala/Masks.scala | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/macros/src/test/scala/Masks.scala b/macros/src/test/scala/Masks.scala index b4fef89a8..d1f91f1c0 100644 --- a/macros/src/test/scala/Masks.scala +++ b/macros/src/test/scala/Masks.scala @@ -91,6 +91,18 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran extends MacroCompilerS compileExecuteAndTest(mem, lib, v, output) } +// Bit-mask memories to non-masked libs whose width is larger than 1. + +class Masks_BitMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + override lazy val depth = 1024 + override lazy val memWidth = 16 + override lazy val memMaskGran = Some(1) + override lazy val libWidth = 8 + override lazy val libMaskGran = None + + compileExecuteAndTest(mem, lib, v, output) +} + // FPGA-style byte-masked memories. class Masks_FPGAStyle_32_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { From 43d242707be16ed3d2225f6df6851a4637f05355 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 9 Aug 2017 01:12:17 -0700 Subject: [PATCH 093/273] Enable some more tests --- macros/src/test/scala/Masks.scala | 9 +++------ macros/src/test/scala/SimpleSplitDepth.scala | 3 +-- macros/src/test/scala/SimpleSplitWidth.scala | 4 +--- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/macros/src/test/scala/Masks.scala b/macros/src/test/scala/Masks.scala index d1f91f1c0..1fd802022 100644 --- a/macros/src/test/scala/Masks.scala +++ b/macros/src/test/scala/Masks.scala @@ -154,8 +154,7 @@ class Masks_PowersOfTwo_32_4 extends MacroCompilerSpec with HasSRAMGenerator wit override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(4) - it should "be enabled when non-power of two masks are supported" is (pending) - //compileExecuteAndTest(mem, lib, v, output) + compileExecuteAndTest(mem, lib, v, output) } class Masks_PowersOfTwo_32_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { @@ -163,8 +162,7 @@ class Masks_PowersOfTwo_32_8 extends MacroCompilerSpec with HasSRAMGenerator wit override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(8) - it should "be enabled when non-power of two masks are supported" is (pending) - //compileExecuteAndTest(mem, lib, v, output) + compileExecuteAndTest(mem, lib, v, output) } class Masks_PowersOfTwo_8_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { @@ -172,8 +170,7 @@ class Masks_PowersOfTwo_8_8 extends MacroCompilerSpec with HasSRAMGenerator with override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(8) - it should "be enabled when non-power of two masks are supported" is (pending) - //compileExecuteAndTest(mem, lib, v, output) + compileExecuteAndTest(mem, lib, v, output) } // Width as a multiple of the mask, bit-masked lib diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 442e44c75..9d1c6dcd9 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -161,8 +161,7 @@ class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGen override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(8) - it should "be enabled when non-bitmasked memories are supported" is (pending) - //compileExecuteAndTest(mem, lib, v, output) + compileExecuteAndTest(mem, lib, v, output) } // Bit level mask diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index de00a2352..f9835bd4a 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -365,9 +365,7 @@ class SplitWidth1024x16_memGran_8_libGran_2_rw extends MacroCompilerSpec with Ha override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(2) - it should "be enabled when non-bit masks are supported" is (pending) - //~ compile(mem, lib, v, false) - //~ execute(mem, lib, false, output) + compileExecuteAndTest(mem, lib, v, output) } // Non-power of two memGran From 11bd81165bd0d3a79476e5722b41a18581871b27 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 9 Aug 2017 02:49:52 -0700 Subject: [PATCH 094/273] Bump mdf --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index b3677862b..671980069 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit b3677862b21a59a63b20dd963ce9c518f293cfac +Subproject commit 6719800697a6ce71af61ad4ac36f90d46e36fe0d From 13d8a0f8f5ff7c8c011e8a6ce6253595b7dc3b7c Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 9 Aug 2017 04:08:03 -0700 Subject: [PATCH 095/273] Add strict mode --- macros/src/main/scala/MacroCompiler.scala | 88 +++++++++++++------ macros/src/test/scala/MacroCompilerSpec.scala | 4 +- 2 files changed, 65 insertions(+), 27 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 162e015ae..be5f24752 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -19,15 +19,35 @@ import scala.collection.mutable.{ArrayBuffer, HashMap} import java.io.{File, FileWriter} import Utils._ +case class MacroCompilerException(msg: String) extends Exception(msg) + object MacroCompilerAnnotation { + /** Macro compiler mode. */ + sealed trait CompilerMode + /** Strict mode - must compile all memories or error out. */ + case object Strict extends CompilerMode + /** Synflops mode - compile all memories with synflops (do not map to lib at all). */ + case object Synflops extends CompilerMode + /** FallbackSynflops - compile all memories to SRAM when possible and fall back to synflops if a memory fails. **/ + case object FallbackSynflops extends CompilerMode + /** Default mode - compile what is possible and do nothing with uncompiled memories. **/ + case object Default extends CompilerMode + def stringToCompilerMode(str: String): CompilerMode = (str: @unchecked) match { + case "strict" => Strict + case "synflops" => Synflops + case "fallbacksynflops" => FallbackSynflops + case "default" => Default + case _ => throw new IllegalArgumentException("No such compiler mode " + str) + } + /** * Parameters associated to this MacroCompilerAnnotation. * @param mem Path to memory lib * @param lib Path to library lib or None if no libraries * @param costMetric Cost metric to use - * @param synflops True to syn flops + * @param mode Compiler mode (see CompilerMode) */ - case class Params(mem: String, lib: Option[String], costMetric: CostMetric, synflops: Boolean) + case class Params(mem: String, lib: Option[String], costMetric: CostMetric, mode: CompilerMode) /** * Create a MacroCompilerAnnotation. @@ -48,7 +68,8 @@ object MacroCompilerAnnotation { class MacroCompilerPass(mems: Option[Seq[Macro]], libs: Option[Seq[Macro]], - costMetric: CostMetric = CostMetric.default) extends firrtl.passes.Pass { + costMetric: CostMetric = CostMetric.default, + mode: MacroCompilerAnnotation.CompilerMode = MacroCompilerAnnotation.Default) extends firrtl.passes.Pass { def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { val pairedPorts = mem.sortedPorts zip lib.sortedPorts @@ -441,10 +462,11 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], costMetric.cost(mem, lib) match { case Some(newCost) => { System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") - if (newCost > cost) (best, cost) - else compile(mem, lib) match { - case None => (best, cost) - case Some(p) => (Some(p), newCost) + // Try compiling + compile(mem, lib) match { + // If it was successful and the new cost is lower + case Some(p) if (newCost < cost) => (Some(p), newCost) + case _ => (best, cost) } } case _ => (best, cost) // Cost function rejected this combination. @@ -455,7 +477,12 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // in the modules list with a compiled version, as well as the extmodule // stub for the lib. best match { - case None => modules + case None => { + if (mode == MacroCompilerAnnotation.Strict) + throw new MacroCompilerException(s"Target memory ${mem.src.name} could not be compiled and strict mode is activated - aborting.") + else + modules + } case Some((mod, bb)) => (modules filterNot (m => m.name == mod.name || m.name == bb.name)) ++ Seq(mod, bb) } @@ -470,7 +497,10 @@ class MacroCompilerTransform extends Transform { def inputForm = MidForm def outputForm = MidForm def execute(state: CircuitState) = getMyAnnotations(state) match { - case Seq(MacroCompilerAnnotation(state.circuit.main, MacroCompilerAnnotation.Params(memFile, libFile, costMetric, synflops))) => + case Seq(MacroCompilerAnnotation(state.circuit.main, MacroCompilerAnnotation.Params(memFile, libFile, costMetric, mode))) => + if (mode == MacroCompilerAnnotation.FallbackSynflops) { + throw new UnsupportedOperationException("Not implemented yet") + } // Read, eliminate None, get only SRAM, make firrtl macro val mems: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) match { case Some(x:Seq[mdf.macrolib.Macro]) => @@ -483,8 +513,8 @@ class MacroCompilerTransform extends Transform { case _ => None } val transforms = Seq( - new MacroCompilerPass(mems, libs, costMetric), - new SynFlopsPass(synflops, libs getOrElse mems.get)) + new MacroCompilerPass(mems, libs, costMetric, mode), + new SynFlopsPass(mode == MacroCompilerAnnotation.Synflops, libs getOrElse mems.get)) (transforms foldLeft state)((s, xform) => xform runTransform s).copy(form=outputForm) case _ => state } @@ -518,6 +548,7 @@ object MacroCompiler extends App { case object Library extends MacroParam case object Verilog extends MacroParam case object CostFunc extends MacroParam + case object Mode extends MacroParam type MacroParamMap = Map[MacroParam, String] type CostParamMap = Map[String, String] val usage = Seq( @@ -527,23 +558,28 @@ object MacroCompiler extends App { " -v, --verilog: Verilog output", " -c, --cost-func: Cost function to use. Optional (default: \"default\")", " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", - " --syn-flops: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes") mkString "\n" - - def parseArgs(map: MacroParamMap, costMap: CostParamMap, synflops: Boolean, args: List[String]): (MacroParamMap, CostParamMap, Boolean) = + """ --mode: + | synflops: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes") + | fallbacksynflops: Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible + | strict: Compile all memories to library or return an error + | default: Compile all memories to library when possible and do nothing in case of errors. + """.stripMargin) mkString "\n" + + def parseArgs(map: MacroParamMap, costMap: CostParamMap, args: List[String]): (MacroParamMap, CostParamMap) = args match { - case Nil => (map, costMap, synflops) + case Nil => (map, costMap) case ("-m" | "--macro-list") :: value :: tail => - parseArgs(map + (Macros -> value), costMap, synflops, tail) + parseArgs(map + (Macros -> value), costMap, tail) case ("-l" | "--library") :: value :: tail => - parseArgs(map + (Library -> value), costMap, synflops, tail) + parseArgs(map + (Library -> value), costMap, tail) case ("-v" | "--verilog") :: value :: tail => - parseArgs(map + (Verilog -> value), costMap, synflops, tail) + parseArgs(map + (Verilog -> value), costMap, tail) case ("-c" | "--cost-func") :: value :: tail => - parseArgs(map + (CostFunc -> value), costMap, synflops, tail) + parseArgs(map + (CostFunc -> value), costMap, tail) case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => - parseArgs(map, costMap + (value1 -> value2), synflops, tail) - case "--syn-flops" :: tail => - parseArgs(map, costMap, true, tail) + parseArgs(map, costMap + (value1 -> value2), tail) + case "--mode" :: value :: tail => + parseArgs(map + (Mode -> value), costMap, tail) case arg :: tail => println(s"Unknown field $arg\n") println(usage) @@ -551,7 +587,7 @@ object MacroCompiler extends App { } def run(args: List[String]) { - val (params, costParams, synflops) = parseArgs(Map[MacroParam, String](), Map[String, String](), false, args) + val (params, costParams) = parseArgs(Map[MacroParam, String](), Map[String, String](), args) try { val macros = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) @@ -568,7 +604,7 @@ object MacroCompiler extends App { MacroCompilerAnnotation.Params( params.get(Macros).get, params.get(Library), CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), - synflops + MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")) ) )) ) @@ -583,11 +619,13 @@ object MacroCompiler extends App { // Close the writer. verilogWriter.close() - } catch { case e: java.util.NoSuchElementException => println(usage) sys.exit(1) + case e: MacroCompilerException => + System.err.println(e.getMessage) + sys.exit(1) case e: Throwable => throw e } diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 6c941a3fa..720bb8122 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -36,7 +36,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate List("-m", mem.toString, "-v", v) ++ (lib match { case None => Nil case Some(l) => List("-l", l.toString) }) ++ costMetricCmdLine ++ - (if (synflops) List("--syn-flops") else Nil) + (if (synflops) List("--mode", "synflops") else Nil) // Run the full compiler as if from the command line interface. // Generates the Verilog; useful in testing since an error will throw an @@ -98,7 +98,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate val macros = mems map (_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) val passes = Seq( - new MacroCompilerPass(Some(mems), libs, getCostMetric), + new MacroCompilerPass(Some(mems), libs, getCostMetric, if (synflops) MacroCompilerAnnotation.Synflops else MacroCompilerAnnotation.Default), new SynFlopsPass(synflops, libs getOrElse mems), RemoveEmpty) val result: Circuit = (passes foldLeft circuit)((c, pass) => pass run c) From 4eca53ba55930a21c4e217953b8d21455cc02891 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 9 Aug 2017 04:31:16 -0700 Subject: [PATCH 096/273] Bump mdf again --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index 671980069..f92bb2d7f 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 6719800697a6ce71af61ad4ac36f90d46e36fe0d +Subproject commit f92bb2d7f22da89a0c24251601cb103478cf9dbb From d2b105079dcc949a9395eac493f1c12c9b86b1cd Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 9 Aug 2017 12:58:43 -0700 Subject: [PATCH 097/273] Not a scaladoc --- macros/src/main/scala/MacroCompiler.scala | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index be5f24752..20d288b8f 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -88,10 +88,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], for (memBit <- 0 until mem.src.width) { val bitsInCurrentMem = memBit - currentLSB - /** - * Helper function to check if it's time to split memories. - * @param effectiveLibWidth Split memory when we have this many bits. - */ + // Helper function to check if it's time to split memories. + // @param effectiveLibWidth Split memory when we have this many bits. def splitMemory(effectiveLibWidth: Int): Unit = { if (bitsInCurrentMem == effectiveLibWidth) { bitPairs += ((currentLSB, memBit - 1)) From bc26f5eb1a963057cda2d994b4a8f3aaaf3c8464 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Sun, 27 Aug 2017 20:03:28 -0700 Subject: [PATCH 098/273] Address review comments --- macros/src/main/scala/CostMetric.scala | 28 ++++--- macros/src/main/scala/MacroCompiler.scala | 87 +++++++++++++------- macros/src/test/scala/SpecificExamples.scala | 6 +- 3 files changed, 79 insertions(+), 42 deletions(-) diff --git a/macros/src/main/scala/CostMetric.scala b/macros/src/main/scala/CostMetric.scala index 6e790f8ce..afcb26606 100644 --- a/macros/src/main/scala/CostMetric.scala +++ b/macros/src/main/scala/CostMetric.scala @@ -21,7 +21,7 @@ trait CostMetric extends Serializable { def cost(mem: Macro, lib: Macro): Option[BigInt] /** - * Helper function to return the map of argments (or an empty map if there are none). + * Helper function to return the map of arguments (or an empty map if there are none). */ def commandLineParams(): Map[String, String] @@ -39,8 +39,10 @@ trait CostMetricCompanion { // Some default cost functions. -/** Palmer's old metric. */ -object PalmerMetric extends CostMetric with CostMetricCompanion { +/** Palmer's old metric. + * TODO: figure out what is the difference between this metric and the current + * default metric and either revive or delete this metric. */ +object OldMetric extends CostMetric with CostMetricCompanion { override def cost(mem: Macro, lib: Macro): Option[BigInt] = { /* Palmer: A quick cost function (that must be kept in sync with * memory_cost()) that attempts to avoid compiling unncessary @@ -52,15 +54,15 @@ object PalmerMetric extends CostMetric with CostMetricCompanion { } override def commandLineParams = Map() - override def name = "PalmerMetric" - override def construct(m: Map[String, String]) = PalmerMetric + override def name = "OldMetric" + override def construct(m: Map[String, String]) = OldMetric } /** * An external cost function. * Calls the specified path with paths to the JSON MDF representation of the mem - * and lib macros. The external executable should return a BigInt. - * None will be returned if the external executable does not return a valid + * and lib macros. The external executable should print a BigInt. + * None will be returned if the external executable does not print a valid * BigInt. */ class ExternalMetric(path: String) extends CostMetric { @@ -110,7 +112,7 @@ object ExternalMetric extends CostMetricCompanion { /** The current default metric in barstools, re-defined by Donggyu. */ // TODO: write tests for this function to make sure it selects the right things -object NewDefaultMetric extends CostMetric with CostMetricCompanion { +object DefaultMetric extends CostMetric with CostMetricCompanion { override def cost(mem: Macro, lib: Macro): Option[BigInt] = { val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) @@ -126,8 +128,8 @@ object NewDefaultMetric extends CostMetric with CostMetricCompanion { } override def commandLineParams = Map() - override def name = "NewDefaultMetric" - override def construct(m: Map[String, String]) = NewDefaultMetric + override def name = "DefaultMetric" + override def construct(m: Map[String, String]) = DefaultMetric } object MacroCompilerUtil { @@ -158,14 +160,14 @@ object MacroCompilerUtil { object CostMetric { /** Define some default metric. */ - val default: CostMetric = NewDefaultMetric + val default: CostMetric = DefaultMetric val costMetricCreators: scala.collection.mutable.Map[String, CostMetricCompanion] = scala.collection.mutable.Map() // Register some default metrics - registerCostMetric(PalmerMetric) + registerCostMetric(OldMetric) registerCostMetric(ExternalMetric) - registerCostMetric(NewDefaultMetric) + registerCostMetric(DefaultMetric) /** * Register a cost metric. diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 20d288b8f..44eec184f 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -21,6 +21,17 @@ import Utils._ case class MacroCompilerException(msg: String) extends Exception(msg) +/** + * The MacroCompilerAnnotation to trigger the macro compiler. + * Note that this annotation does NOT actually target any modules for + * compilation. It simply holds all the settings for the memory compiler. The + * actual selection of which memories to compile is set in the Params. + * + * To use, simply annotate the entire circuit itself with this annotation and + * include [[MacroCompilerTransform]]. + * + * TODO: make this into a "true" annotation? + */ object MacroCompilerAnnotation { /** Macro compiler mode. */ sealed trait CompilerMode @@ -30,12 +41,22 @@ object MacroCompilerAnnotation { case object Synflops extends CompilerMode /** FallbackSynflops - compile all memories to SRAM when possible and fall back to synflops if a memory fails. **/ case object FallbackSynflops extends CompilerMode - /** Default mode - compile what is possible and do nothing with uncompiled memories. **/ - case object Default extends CompilerMode + /** CompileAvailable - compile what is possible and do nothing with uncompiled memories. **/ + case object CompileAvailable extends CompilerMode + + /** + * The default mode for the macro compiler. + * TODO: Maybe set the default to FallbackSynflops (typical for + * vlsi_mem_gen-like scripts) once it's implemented? + */ + val Default = CompileAvailable + + /** Helper function to select a compiler mode. */ def stringToCompilerMode(str: String): CompilerMode = (str: @unchecked) match { case "strict" => Strict case "synflops" => Synflops case "fallbacksynflops" => FallbackSynflops + case "compileavailable" => CompileAvailable case "default" => Default case _ => throw new IllegalArgumentException("No such compiler mode " + str) } @@ -51,7 +72,7 @@ object MacroCompilerAnnotation { /** * Create a MacroCompilerAnnotation. - * @param c Name of the module(?) for this annotation. + * @param c Top-level circuit name (see class description) * @param p Parameters (see above). */ def apply(c: String, p: Params): Annotation = @@ -73,7 +94,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { val pairedPorts = mem.sortedPorts zip lib.sortedPorts - // Parallel mapping + // Width mapping /** * This is a list of submemories by width. @@ -168,6 +189,13 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // In this case we can only compile very wastefully (by treating // lib as a mem maskGran width memory) :( splitMemory(memMask.get) + + // TODO: there's an optimization that could allow us to pack more + // bits in and be more efficient. + // e.g. say if mem maskGran = 4, lib maskGran = 8, libWidth = 32 + // We could use 16 of bit (bits 0-3, 8-11, 16-19, 24-27) instead + // of treating it as simply a width 4 (!!!) memory. + // This would require a major refactor though. } else { System.err.println(s"Lib maskGran ${m} is not a multiple of mem maskGran ${l}: currently not supported") return None @@ -180,7 +208,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Add in the last chunk if there are any leftovers bitPairs += ((currentLSB, mem.src.width.toInt - 1)) - // Serial mapping + // Depth mapping val stmts = ArrayBuffer[Statement]() val outputs = HashMap[String, ArrayBuffer[(Expression, Expression)]]() val selects = HashMap[String, Expression]() @@ -270,7 +298,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * together a bunch of narrower memories, which can only be * done after generating all the memories. This saves up the * output statements for later. */ - val name = s"${mem}_${i}_${j}" + val name = s"${mem}_${i}_${j}" // This name is the output from the instance (mem vs ${mem}). val exp = portToExpression(bits(WSubField(inst, lib), high-low, 0), Some(lib_polarity)) stmts += DefNode(NoInfo, name, exp) cats += WRef(name) @@ -332,15 +360,17 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], })).reverse) } case None => - /* Palmer: If there is no input port on the source memory port - * then we don't ever want to turn on this write - * enable. Otherwise, we just _always_ turn on the - * write enable port on the inner memory. */ - if (libPort.src.maskPort.isEmpty) one - else { + /* If there is a lib mask port but no mem mask port, just turn on + * all bits of the lib mask port. */ + if (libPort.src.maskPort.isDefined) { val width = libPort.src.width / libPort.src.effectiveMaskGran val value = (BigInt(1) << width.toInt) - 1 UIntLiteral(value, IntWidth(width)) + } else { + // No mask ports on either side. + // We treat a "mask" of a single bit to be equivalent to a write + // enable (as used below). + one } } @@ -390,25 +420,26 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], stmts += connectPorts(memMask, mask, mask_polarity) stmts += connectPorts(andAddrMatch(and(memWriteEnable, memChipEnable)), we, mask_polarity) - case (None, Some(PolarizedPort(we, we_polarity)), chipEnable) if bitWidth(memMask.tpe) == 1 => - /* Palmer: If we're expected to provide mask ports without a - * memory that actually has them then we can use the - * write enable port instead of the mask port. */ - stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), - we, we_polarity) - chipEnable match { - case Some(PolarizedPort(en, en_polarity)) => { - stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) + case (None, Some(PolarizedPort(we, we_polarity)), chipEnable) => + if (bitWidth(memMask.tpe) == 1) { + /* Palmer: If we're expected to provide mask ports without a + * memory that actually has them then we can use the + * write enable port instead of the mask port. */ + stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), + we, we_polarity) + chipEnable match { + case Some(PolarizedPort(en, en_polarity)) => { + stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) + } + case _ => // TODO: do we care about the case where mem has chipEnable but lib doesn't? } - case _ => // TODO: do we care about the case where mem has chipEnable but lib doesn't? + } else { + System.err.println("cannot emulate multi-bit mask ports with write enable") + return None } - case (None, Some(PolarizedPort(we, we_polarity)), Some(PolarizedPort(en, en_polarity))) => - // TODO - System.err.println("cannot emulate multi-bit mask ports with write enable") - return None case (None, None, None) => - /* Palmer: There's nothing to do here since there aren't any - * ports to match up. */ + // No write ports to match up (this may be a read-only port). + // This isn't necessarily an error condition. } } // Cat macro outputs for selection diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index fb884084a..2775f9a72 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -385,5 +385,9 @@ circuit T_2172_ext : defname = SRAM2RW64x32 """ - compileExecuteAndTest(mem, lib, v, output) + // TODO FIXME: Enable this test when firrtl #644 https://github.com/freechipsproject/firrtl/issues/644 is fixed + "rocket example" should "work" in { + pending + } + //~ compileExecuteAndTest(mem, lib, v, output) } From e09f8b1b0d70f3fc0ce75990f09d4cc8d7ed7efc Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 3 Oct 2017 01:06:02 -0700 Subject: [PATCH 099/273] Fix grammar --- macros/src/main/scala/MacroCompiler.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 44eec184f..2f66ca9c3 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -588,9 +588,9 @@ object MacroCompiler extends App { " -c, --cost-func: Cost function to use. Optional (default: \"default\")", " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", """ --mode: - | synflops: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes") - | fallbacksynflops: Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible - | strict: Compile all memories to library or return an error + | synflops: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes. + | fallbacksynflops: Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible. + | strict: Compile all memories to library or return an error. | default: Compile all memories to library when possible and do nothing in case of errors. """.stripMargin) mkString "\n" From c91d98d5b3104a1c416e3a0aa098d672d1fc9639 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 3 Oct 2017 11:24:44 -0700 Subject: [PATCH 100/273] Bump mdf for the last time, for now --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index f92bb2d7f..2b5f3c16d 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit f92bb2d7f22da89a0c24251601cb103478cf9dbb +Subproject commit 2b5f3c16daac6cd6eb9ed6aa2b9d836cd6e0648c From e1499fcdc0016697b949cfbef5364daa72f8e6c5 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Tue, 3 Oct 2017 11:34:48 -0700 Subject: [PATCH 101/273] Update command line help --- macros/src/main/scala/MacroCompiler.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 2f66ca9c3..f032a9794 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -588,10 +588,10 @@ object MacroCompiler extends App { " -c, --cost-func: Cost function to use. Optional (default: \"default\")", " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", """ --mode: + | strict: Compile all memories to library or return an error. | synflops: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes. | fallbacksynflops: Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible. - | strict: Compile all memories to library or return an error. - | default: Compile all memories to library when possible and do nothing in case of errors. + | compileavailable: Compile all memories to library when possible and do nothing in case of errors. (default) """.stripMargin) mkString "\n" def parseArgs(map: MacroParamMap, costMap: CostParamMap, args: List[String]): (MacroParamMap, CostParamMap) = From c884a2fb15bba3f9c9bf3656be3006a01a421b88 Mon Sep 17 00:00:00 2001 From: edwardcwang Date: Fri, 6 Oct 2017 18:04:49 -0700 Subject: [PATCH 102/273] Correct multi-ported memory compilation (#27) * Correct multi-ported memory compilation It was incorrectly splitting multiple times before. Fixed the issue and added regression tests for this issue. * Add 1 read 1 write test --- macros/src/main/scala/MacroCompiler.scala | 55 +- macros/src/test/resources/lib-BOOMTest.json | 1165 ++++++++++++++++ macros/src/test/scala/MacroCompilerSpec.scala | 82 +- macros/src/test/scala/MultiPort.scala | 392 ++++++ macros/src/test/scala/SpecificExamples.scala | 1211 +++++++++++++++++ 5 files changed, 2869 insertions(+), 36 deletions(-) create mode 100644 macros/src/test/resources/lib-BOOMTest.json create mode 100644 macros/src/test/scala/MultiPort.scala diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index f032a9794..a9025e558 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -91,6 +91,17 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], libs: Option[Seq[Macro]], costMetric: CostMetric = CostMetric.default, mode: MacroCompilerAnnotation.CompilerMode = MacroCompilerAnnotation.Default) extends firrtl.passes.Pass { + // Helper function to check the legality of bitPairs. + // e.g. ((0,21), (22,43)) is legal + // ((0,21), (22,21)) is illegal and will throw an assert + private def checkBitPairs(bitPairs: Seq[(BigInt, BigInt)]): Unit = { + bitPairs.foldLeft(BigInt(-1))((lastBit, nextPair) => { + assert(lastBit + 1 == nextPair._1, s"Pair's first bit ${nextPair._1} does not follow last bit ${lastBit}"); + assert(nextPair._2 >= nextPair._1, s"Pair ${nextPair} in bitPairs ${bitPairs} is illegal"); + nextPair._2 + }) + } + def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { val pairedPorts = mem.sortedPorts zip lib.sortedPorts @@ -103,23 +114,33 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * width=8 memories. */ val bitPairs = ArrayBuffer[(BigInt, BigInt)]() - var currentLSB = 0 + var currentLSB: BigInt = 0 // Process every bit in the mem width. for (memBit <- 0 until mem.src.width) { val bitsInCurrentMem = memBit - currentLSB - // Helper function to check if it's time to split memories. - // @param effectiveLibWidth Split memory when we have this many bits. - def splitMemory(effectiveLibWidth: Int): Unit = { - if (bitsInCurrentMem == effectiveLibWidth) { - bitPairs += ((currentLSB, memBit - 1)) - currentLSB = memBit - } - } - + // We'll need to find a bitPair that works for *all* the ports of the memory. + // e.g. unmasked read port and masked write port. + // For each port, store a tentative candidate for the split. + // Afterwards, figure out which one to use. + val bitPairCandidates = ArrayBuffer[(BigInt, BigInt)]() for ((memPort, libPort) <- pairedPorts) { + // Sanity check to make sure we only split once per bit, once per port. + var alreadySplit: Boolean = false + + // Helper function to check if it's time to split memories. + // @param effectiveLibWidth Split memory when we have this many bits. + def splitMemory(effectiveLibWidth: Int): Unit = { + assert (!alreadySplit) + + if (bitsInCurrentMem == effectiveLibWidth) { + bitPairCandidates += ((currentLSB, memBit - 1)) + alreadySplit = true + } + } + // Make sure we don't have a maskGran larger than the width of the memory. assert (memPort.src.effectiveMaskGran <= memPort.src.width) assert (libPort.src.effectiveMaskGran <= libPort.src.width) @@ -204,9 +225,23 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } } } + + // Choose an actual bit pair to add. + // We'll have to choose the smallest one (e.g. unmasked read port might be more tolerant of a bigger split than the masked write port). + if (bitPairCandidates.length == 0) { + // No pair needed to split, just continue + } else { + val bestPair = bitPairCandidates.reduceLeft((leftPair, rightPair) => { + if (leftPair._2 - leftPair._1 + 1 > rightPair._2 - rightPair._1 + 1) leftPair else rightPair + }) + bitPairs += bestPair + currentLSB = bestPair._2 + BigInt(1) // advance the LSB pointer + } } // Add in the last chunk if there are any leftovers bitPairs += ((currentLSB, mem.src.width.toInt - 1)) + // Check bit pairs + checkBitPairs(bitPairs) // Depth mapping val stmts = ArrayBuffer[Statement]() diff --git a/macros/src/test/resources/lib-BOOMTest.json b/macros/src/test/resources/lib-BOOMTest.json new file mode 100644 index 000000000..8246bc3d3 --- /dev/null +++ b/macros/src/test/resources/lib-BOOMTest.json @@ -0,0 +1,1165 @@ +[ + { + "family": "1rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_1024x8", + "type": "sram", + "depth": 1024 + }, + { + "family": "1rw", + "width": 46, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_128x46", + "type": "sram", + "depth": 128 + }, + { + "family": "1rw", + "width": 48, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_128x48", + "type": "sram", + "depth": 128 + }, + { + "family": "1rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_128x8", + "type": "sram", + "depth": 128 + }, + { + "family": "1rw", + "width": 128, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_256x128", + "type": "sram", + "depth": 256 + }, + { + "family": "1rw", + "width": 32, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_256x32", + "type": "sram", + "depth": 256 + }, + { + "family": "1rw", + "width": 46, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_256x46", + "type": "sram", + "depth": 256 + }, + { + "family": "1rw", + "width": 48, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_256x48", + "type": "sram", + "depth": 256 + }, + { + "family": "1rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_256x8", + "type": "sram", + "depth": 256 + }, + { + "family": "1rw", + "width": 50, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_32x50", + "type": "sram", + "depth": 32 + }, + { + "family": "1rw", + "width": 128, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_512x128", + "type": "sram", + "depth": 512 + }, + { + "family": "1rw", + "width": 32, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_512x32", + "type": "sram", + "depth": 512 + }, + { + "family": "1rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_512x8", + "type": "sram", + "depth": 512 + }, + { + "family": "1rw", + "width": 128, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_64x128", + "type": "sram", + "depth": 64 + }, + { + "family": "1rw", + "width": 32, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_64x32", + "type": "sram", + "depth": 64 + }, + { + "family": "1rw", + "width": 34, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_64x34", + "type": "sram", + "depth": 64 + }, + { + "family": "1rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB", + "write enable port name": "WEB", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE", + "chip enable port polarity": "active low", + "address port name": "A", + "read enable port name": "OEB", + "input port name": "I", + "input port polarity": "active high" + } + ], + "name": "my_sram_1rw_64x8", + "type": "sram", + "depth": 64 + }, + { + "family": "2rw", + "width": 16, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_128x16", + "type": "sram", + "depth": 128 + }, + { + "family": "2rw", + "width": 32, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_128x32", + "type": "sram", + "depth": 128 + }, + { + "family": "2rw", + "width": 4, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_128x4", + "type": "sram", + "depth": 128 + }, + { + "family": "2rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_128x8", + "type": "sram", + "depth": 128 + }, + { + "family": "2rw", + "width": 16, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_16x16", + "type": "sram", + "depth": 16 + }, + { + "family": "2rw", + "width": 32, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_16x32", + "type": "sram", + "depth": 16 + }, + { + "family": "2rw", + "width": 4, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_16x4", + "type": "sram", + "depth": 16 + }, + { + "family": "2rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_16x8", + "type": "sram", + "depth": 16 + }, + { + "family": "2rw", + "width": 16, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_32x16", + "type": "sram", + "depth": 32 + }, + { + "family": "2rw", + "width": 22, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_32x22", + "type": "sram", + "depth": 32 + }, + { + "family": "2rw", + "width": 32, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_32x32", + "type": "sram", + "depth": 32 + }, + { + "family": "2rw", + "width": 39, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_32x39", + "type": "sram", + "depth": 32 + }, + { + "family": "2rw", + "width": 4, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_32x4", + "type": "sram", + "depth": 32 + }, + { + "family": "2rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_32x8", + "type": "sram", + "depth": 32 + }, + { + "family": "2rw", + "width": 16, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_64x16", + "type": "sram", + "depth": 64 + }, + { + "family": "2rw", + "width": 32, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_64x32", + "type": "sram", + "depth": 64 + }, + { + "family": "2rw", + "width": 4, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_64x4", + "type": "sram", + "depth": 64 + }, + { + "family": "2rw", + "width": 8, + "ports": [ + { + "chip enable port name": "CSB1", + "write enable port name": "WEB1", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O1", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE1", + "chip enable port polarity": "active low", + "address port name": "A1", + "read enable port name": "OEB1", + "input port name": "I1", + "input port polarity": "active high" + }, + { + "chip enable port name": "CSB2", + "write enable port name": "WEB2", + "address port polarity": "active high", + "output port polarity": "active high", + "output port name": "O2", + "write enable port polarity": "active low", + "read enable port polarity": "active low", + "clock port polarity": "positive edge", + "clock port name": "CE2", + "chip enable port polarity": "active low", + "address port name": "A2", + "read enable port name": "OEB2", + "input port name": "I2", + "input port polarity": "active high" + } + ], + "name": "my_sram_2rw_64x8", + "type": "sram", + "depth": 64 + } +] diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 720bb8122..b5efbf9d8 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -301,47 +301,77 @@ trait HasSimpleTestGenerator { } else "" } + /** Helper function to generate a port. + * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") + * @param addrWidth Address port width + * @param width data width + * @param write Has a write port? + * @param writeEnable Has a write enable port? + * @param read Has a read port? + * @param readEnable Has a read enable port? + * @param mask Mask granularity (# bits) of the port or None. */ + def generatePort(prefix: String, addrWidth: Int, width: Int, write: Boolean, writeEnable: Boolean, read: Boolean, readEnable: Boolean, mask: Option[Int]): String = { + val readStr = if (read) s"output ${prefix}_dout : UInt<$width>" else "" + val writeStr = if (write) s"input ${prefix}_din : UInt<$width>" else "" + val readEnableStr = if (readEnable) s"input ${prefix}_read_en : UInt<1>" else "" + val writeEnableStr = if (writeEnable) s"input ${prefix}_write_en : UInt<1>" else "" + val maskStr = mask match { + case Some(maskBits: Int) => s"input ${prefix}_mask : UInt<${maskBits}>" + case _ => "" + } +s""" + input ${prefix}_clk : Clock + input ${prefix}_addr : UInt<$addrWidth> + ${writeStr} + ${readStr} + ${readEnableStr} + ${writeEnableStr} + ${maskStr} +""" + } + + /** Helper function to generate a RW footer port. + * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") + * @param readEnable Has a read enable port? + * @param mask Mask granularity (# bits) of the port or None. */ + def generateReadWriteFooterPort(prefix: String, readEnable: Boolean, mask: Option[Int]): String = { + generatePort(libPortPrefix, lib_addr_width, libWidth, + write=true, writeEnable=true, read=true, readEnable=readEnable, mask) + } + + /** Helper function to generate a RW header port. + * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") + * @param readEnable Has a read enable port? + * @param mask Mask granularity (# bits) of the port or None. */ + def generateReadWriteHeaderPort(prefix: String, readEnable: Boolean, mask: Option[Int]): String = { + generatePort(memPortPrefix, mem_addr_width, memWidth, + write=true, writeEnable=true, read=true, readEnable=readEnable, mask) + } + + // Generate the header memory ports. + def generateHeaderPorts(): String = { + require (memSRAM.ports.size == 1, "Header generator only supports single RW port mem") + generateReadWriteHeaderPort(memPortPrefix, memSRAM.ports(0).readEnable.isDefined, if (memHasMask) Some(memMaskBits) else None) + } + // Generate the header (contains the circuit statement and the target memory // module. def generateHeader(): String = { - require (memSRAM.ports.size == 1, "Header generator only supports single port mem") - - val readEnable = if (memSRAM.ports(0).readEnable.isDefined) s"input ${memPortPrefix}_read_en : UInt<1>" else "" - val headerMask = if (memHasMask) s"input ${memPortPrefix}_mask : UInt<${memMaskBits}>" else "" s""" circuit $mem_name : module $mem_name : - input ${memPortPrefix}_clk : Clock - input ${memPortPrefix}_addr : UInt<$mem_addr_width> - input ${memPortPrefix}_din : UInt<$memWidth> - output ${memPortPrefix}_dout : UInt<$memWidth> - ${readEnable} - input ${memPortPrefix}_write_en : UInt<1> - ${headerMask} +${generateHeaderPorts} """ } // Generate the target memory ports. def generateFooterPorts(): String = { - require (libSRAM.ports.size == 1, "Footer generator only supports single port lib") - - val readEnable = if (libSRAM.ports(0).readEnable.isDefined) s"input ${libPortPrefix}_read_en : UInt<1>" else "" - val footerMask = if (libHasMask) s"input ${libPortPrefix}_mask : UInt<${libMaskBits}>" else "" - s""" - input ${libPortPrefix}_clk : Clock - input ${libPortPrefix}_addr : UInt<$lib_addr_width> - input ${libPortPrefix}_din : UInt<$libWidth> - output ${libPortPrefix}_dout : UInt<$libWidth> - ${readEnable} - input ${libPortPrefix}_write_en : UInt<1> - ${footerMask} - """ + require (libSRAM.ports.size == 1, "Footer generator only supports single RW port mem") + generateReadWriteFooterPort(libPortPrefix, libSRAM.ports(0).readEnable.isDefined, if (libHasMask) Some(libMaskBits) else None) } // Generate the footer (contains the target memory extmodule declaration by default). def generateFooter(): String = { - require (libSRAM.ports.size == 1, "Footer generator only supports single port lib") - s""" extmodule $lib_name : ${generateFooterPorts} diff --git a/macros/src/test/scala/MultiPort.scala b/macros/src/test/scala/MultiPort.scala new file mode 100644 index 000000000..ac1fb2f8a --- /dev/null +++ b/macros/src/test/scala/MultiPort.scala @@ -0,0 +1,392 @@ +package barstools.macros + +// Test that the memory compiler works fine for compiling multi-port memories. +// TODO: extend test generator to also automatically generate multi-ported memories. + +class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + import mdf.macrolib._ + + override lazy val depth = 1024 + override lazy val memWidth = 64 + override lazy val memMaskGran = Some(16) + override lazy val libWidth = 16 + + override def generateMemSRAM() = { + SRAMMacro( + name=mem_name, + width=memWidth, + depth=memDepth, + family="2rw", + ports=Seq(generateTestPort( + "portA", memWidth, memDepth, maskGran=memMaskGran, + write=true, writeEnable=true, + read=true, readEnable=true + ), generateTestPort( + "portB", memWidth, memDepth, maskGran=memMaskGran, + write=true, writeEnable=true, + read=true, readEnable=true + )) + ) + } + + override def generateLibSRAM() = { + SRAMMacro( + name=lib_name, + width=libWidth, + depth=libDepth, + family="2rw", + ports=Seq(generateTestPort( + "portA", libWidth, libDepth, + write=true, writeEnable=true, + read=true, readEnable=true + ), generateTestPort( + "portB", libWidth, libDepth, + write=true, writeEnable=true, + read=true, readEnable=true + )) + ) + } + + override def generateHeaderPorts() = { + generateReadWriteHeaderPort("portA", true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort("portB", true, Some(memMaskBits)) + } + + override def generateFooterPorts() = { + generateReadWriteFooterPort("portA", true, None) + "\n" + generateReadWriteFooterPort("portB", true, None) + } + + override def generateBody() = +""" + inst mem_0_0 of awesome_lib_mem + inst mem_0_1 of awesome_lib_mem + inst mem_0_2 of awesome_lib_mem + inst mem_0_3 of awesome_lib_mem + mem_0_0.portA_clk <= portA_clk + mem_0_0.portA_addr <= portA_addr + node portA_dout_0_0 = bits(mem_0_0.portA_dout, 15, 0) + mem_0_0.portA_din <= bits(portA_din, 15, 0) + mem_0_0.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_0.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 0, 0)), UInt<1>("h1")) + mem_0_1.portA_clk <= portA_clk + mem_0_1.portA_addr <= portA_addr + node portA_dout_0_1 = bits(mem_0_1.portA_dout, 15, 0) + mem_0_1.portA_din <= bits(portA_din, 31, 16) + mem_0_1.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_1.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 1, 1)), UInt<1>("h1")) + mem_0_2.portA_clk <= portA_clk + mem_0_2.portA_addr <= portA_addr + node portA_dout_0_2 = bits(mem_0_2.portA_dout, 15, 0) + mem_0_2.portA_din <= bits(portA_din, 47, 32) + mem_0_2.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_2.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 2, 2)), UInt<1>("h1")) + mem_0_3.portA_clk <= portA_clk + mem_0_3.portA_addr <= portA_addr + node portA_dout_0_3 = bits(mem_0_3.portA_dout, 15, 0) + mem_0_3.portA_din <= bits(portA_din, 63, 48) + mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_3.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 3, 3)), UInt<1>("h1")) + node portA_dout_0 = cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))) + mem_0_0.portB_clk <= portB_clk + mem_0_0.portB_addr <= portB_addr + node portB_dout_0_0 = bits(mem_0_0.portB_dout, 15, 0) + mem_0_0.portB_din <= bits(portB_din, 15, 0) + mem_0_0.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_0.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 0, 0)), UInt<1>("h1")) + mem_0_1.portB_clk <= portB_clk + mem_0_1.portB_addr <= portB_addr + node portB_dout_0_1 = bits(mem_0_1.portB_dout, 15, 0) + mem_0_1.portB_din <= bits(portB_din, 31, 16) + mem_0_1.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_1.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 1, 1)), UInt<1>("h1")) + mem_0_2.portB_clk <= portB_clk + mem_0_2.portB_addr <= portB_addr + node portB_dout_0_2 = bits(mem_0_2.portB_dout, 15, 0) + mem_0_2.portB_din <= bits(portB_din, 47, 32) + mem_0_2.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_2.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 2, 2)), UInt<1>("h1")) + mem_0_3.portB_clk <= portB_clk + mem_0_3.portB_addr <= portB_addr + node portB_dout_0_3 = bits(mem_0_3.portB_dout, 15, 0) + mem_0_3.portB_din <= bits(portB_din, 63, 48) + mem_0_3.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_3.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 3, 3)), UInt<1>("h1")) + node portB_dout_0 = cat(portB_dout_0_3, cat(portB_dout_0_2, cat(portB_dout_0_1, portB_dout_0_0))) + portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<1>("h0")) + portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<1>("h0")) +""" + + compileExecuteAndTest(mem, lib, v, output) +} + +class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + import mdf.macrolib._ + + override lazy val depth = 1024 + override lazy val memWidth = 64 + override lazy val memMaskGran = Some(16) + override lazy val libWidth = 16 + + override def generateMemSRAM() = { + SRAMMacro( + name=mem_name, + width=memWidth, + depth=memDepth, + family="1r1w", + ports=Seq(generateTestPort( + "portA", memWidth, memDepth, maskGran=memMaskGran, + write=false, writeEnable=false, + read=true, readEnable=true + ), generateTestPort( + "portB", memWidth, memDepth, maskGran=memMaskGran, + write=true, writeEnable=true, + read=false, readEnable=false + )) + ) + } + + override def generateLibSRAM() = { + SRAMMacro( + name=lib_name, + width=libWidth, + depth=libDepth, + family="1r1w", + ports=Seq(generateTestPort( + "portA", libWidth, libDepth, + write=false, writeEnable=false, + read=true, readEnable=true + ), generateTestPort( + "portB", libWidth, libDepth, + write=true, writeEnable=true, + read=false, readEnable=false + )) + ) + } + + override def generateHeaderPorts() = { + generatePort("portA", mem_addr_width, memWidth, + write=false, writeEnable=false, read=true, readEnable=true, Some(memMaskBits)) + "\n" + + generatePort("portB", mem_addr_width, memWidth, + write=true, writeEnable=true, read=false, readEnable=false, Some(memMaskBits)) + } + + override def generateFooterPorts() = { + generatePort("portA", lib_addr_width, libWidth, + write=false, writeEnable=false, read=true, readEnable=true, None) + "\n" + + generatePort("portB", lib_addr_width, libWidth, + write=true, writeEnable=true, read=false, readEnable=false, None) + } + + override def generateBody() = +""" + inst mem_0_0 of awesome_lib_mem + inst mem_0_1 of awesome_lib_mem + inst mem_0_2 of awesome_lib_mem + inst mem_0_3 of awesome_lib_mem + mem_0_0.portB_clk <= portB_clk + mem_0_0.portB_addr <= portB_addr + mem_0_0.portB_din <= bits(portB_din, 15, 0) + mem_0_0.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 0, 0)), UInt<1>("h1")) + mem_0_1.portB_clk <= portB_clk + mem_0_1.portB_addr <= portB_addr + mem_0_1.portB_din <= bits(portB_din, 31, 16) + mem_0_1.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 1, 1)), UInt<1>("h1")) + mem_0_2.portB_clk <= portB_clk + mem_0_2.portB_addr <= portB_addr + mem_0_2.portB_din <= bits(portB_din, 47, 32) + mem_0_2.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 2, 2)), UInt<1>("h1")) + mem_0_3.portB_clk <= portB_clk + mem_0_3.portB_addr <= portB_addr + mem_0_3.portB_din <= bits(portB_din, 63, 48) + mem_0_3.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 3, 3)), UInt<1>("h1")) + mem_0_0.portA_clk <= portA_clk + mem_0_0.portA_addr <= portA_addr + node portA_dout_0_0 = bits(mem_0_0.portA_dout, 15, 0) + mem_0_0.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_1.portA_clk <= portA_clk + mem_0_1.portA_addr <= portA_addr + node portA_dout_0_1 = bits(mem_0_1.portA_dout, 15, 0) + mem_0_1.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_2.portA_clk <= portA_clk + mem_0_2.portA_addr <= portA_addr + node portA_dout_0_2 = bits(mem_0_2.portA_dout, 15, 0) + mem_0_2.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_3.portA_clk <= portA_clk + mem_0_3.portA_addr <= portA_addr + node portA_dout_0_3 = bits(mem_0_3.portA_dout, 15, 0) + mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + node portA_dout_0 = cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))) + portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<1>("h0")) +""" + + compileExecuteAndTest(mem, lib, v, output) +} + +class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + import mdf.macrolib._ + + override lazy val depth = 1024 + override lazy val memWidth = 64 + override lazy val memMaskGran = Some(16) + override lazy val libWidth = 16 + + lazy val memMaskGranB = 8 // these generators are run at constructor time + + override def generateMemSRAM() = { + println(memMaskGranB) + SRAMMacro( + name=mem_name, + width=memWidth, + depth=memDepth, + family="2rw", + ports=Seq(generateTestPort( + "portA", memWidth, memDepth, maskGran=memMaskGran, + write=true, writeEnable=true, + read=true, readEnable=true + ), generateTestPort( + "portB", memWidth, memDepth, maskGran=Some(memMaskGranB), + write=true, writeEnable=true, + read=true, readEnable=true + )) + ) + } + + override def generateLibSRAM() = { + SRAMMacro( + name=lib_name, + width=libWidth, + depth=libDepth, + family="2rw", + ports=Seq(generateTestPort( + "portA", libWidth, libDepth, + write=true, writeEnable=true, + read=true, readEnable=true + ), generateTestPort( + "portB", libWidth, libDepth, + write=true, writeEnable=true, + read=true, readEnable=true + )) + ) + } + + override def generateHeaderPorts() = { + generateReadWriteHeaderPort("portA", true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort("portB", true, Some(memWidth / memMaskGranB)) + } + + override def generateFooterPorts() = { + generateReadWriteFooterPort("portA", true, None) + "\n" + generateReadWriteFooterPort("portB", true, None) + } + + override def generateBody() = +""" + inst mem_0_0 of awesome_lib_mem + inst mem_0_1 of awesome_lib_mem + inst mem_0_2 of awesome_lib_mem + inst mem_0_3 of awesome_lib_mem + inst mem_0_4 of awesome_lib_mem + inst mem_0_5 of awesome_lib_mem + inst mem_0_6 of awesome_lib_mem + inst mem_0_7 of awesome_lib_mem + mem_0_0.portA_clk <= portA_clk + mem_0_0.portA_addr <= portA_addr + node portA_dout_0_0 = bits(mem_0_0.portA_dout, 7, 0) + mem_0_0.portA_din <= bits(portA_din, 7, 0) + mem_0_0.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_0.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 0, 0)), UInt<1>("h1")) + mem_0_1.portA_clk <= portA_clk + mem_0_1.portA_addr <= portA_addr + node portA_dout_0_1 = bits(mem_0_1.portA_dout, 7, 0) + mem_0_1.portA_din <= bits(portA_din, 15, 8) + mem_0_1.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_1.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 0, 0)), UInt<1>("h1")) + mem_0_2.portA_clk <= portA_clk + mem_0_2.portA_addr <= portA_addr + node portA_dout_0_2 = bits(mem_0_2.portA_dout, 7, 0) + mem_0_2.portA_din <= bits(portA_din, 23, 16) + mem_0_2.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_2.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 1, 1)), UInt<1>("h1")) + mem_0_3.portA_clk <= portA_clk + mem_0_3.portA_addr <= portA_addr + node portA_dout_0_3 = bits(mem_0_3.portA_dout, 7, 0) + mem_0_3.portA_din <= bits(portA_din, 31, 24) + mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_3.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 1, 1)), UInt<1>("h1")) + mem_0_4.portA_clk <= portA_clk + mem_0_4.portA_addr <= portA_addr + node portA_dout_0_4 = bits(mem_0_4.portA_dout, 7, 0) + mem_0_4.portA_din <= bits(portA_din, 39, 32) + mem_0_4.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_4.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 2, 2)), UInt<1>("h1")) + mem_0_5.portA_clk <= portA_clk + mem_0_5.portA_addr <= portA_addr + node portA_dout_0_5 = bits(mem_0_5.portA_dout, 7, 0) + mem_0_5.portA_din <= bits(portA_din, 47, 40) + mem_0_5.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_5.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 2, 2)), UInt<1>("h1")) + mem_0_6.portA_clk <= portA_clk + mem_0_6.portA_addr <= portA_addr + node portA_dout_0_6 = bits(mem_0_6.portA_dout, 7, 0) + mem_0_6.portA_din <= bits(portA_din, 55, 48) + mem_0_6.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_6.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 3, 3)), UInt<1>("h1")) + mem_0_7.portA_clk <= portA_clk + mem_0_7.portA_addr <= portA_addr + node portA_dout_0_7 = bits(mem_0_7.portA_dout, 7, 0) + mem_0_7.portA_din <= bits(portA_din, 63, 56) + mem_0_7.portA_read_en <= and(portA_read_en, UInt<1>("h1")) + mem_0_7.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 3, 3)), UInt<1>("h1")) + node portA_dout_0 = cat(portA_dout_0_7, cat(portA_dout_0_6, cat(portA_dout_0_5, cat(portA_dout_0_4, cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))))))) + mem_0_0.portB_clk <= portB_clk + mem_0_0.portB_addr <= portB_addr + node portB_dout_0_0 = bits(mem_0_0.portB_dout, 7, 0) + mem_0_0.portB_din <= bits(portB_din, 7, 0) + mem_0_0.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_0.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 0, 0)), UInt<1>("h1")) + mem_0_1.portB_clk <= portB_clk + mem_0_1.portB_addr <= portB_addr + node portB_dout_0_1 = bits(mem_0_1.portB_dout, 7, 0) + mem_0_1.portB_din <= bits(portB_din, 15, 8) + mem_0_1.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_1.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 1, 1)), UInt<1>("h1")) + mem_0_2.portB_clk <= portB_clk + mem_0_2.portB_addr <= portB_addr + node portB_dout_0_2 = bits(mem_0_2.portB_dout, 7, 0) + mem_0_2.portB_din <= bits(portB_din, 23, 16) + mem_0_2.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_2.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 2, 2)), UInt<1>("h1")) + mem_0_3.portB_clk <= portB_clk + mem_0_3.portB_addr <= portB_addr + node portB_dout_0_3 = bits(mem_0_3.portB_dout, 7, 0) + mem_0_3.portB_din <= bits(portB_din, 31, 24) + mem_0_3.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_3.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 3, 3)), UInt<1>("h1")) + mem_0_4.portB_clk <= portB_clk + mem_0_4.portB_addr <= portB_addr + node portB_dout_0_4 = bits(mem_0_4.portB_dout, 7, 0) + mem_0_4.portB_din <= bits(portB_din, 39, 32) + mem_0_4.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_4.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 4, 4)), UInt<1>("h1")) + mem_0_5.portB_clk <= portB_clk + mem_0_5.portB_addr <= portB_addr + node portB_dout_0_5 = bits(mem_0_5.portB_dout, 7, 0) + mem_0_5.portB_din <= bits(portB_din, 47, 40) + mem_0_5.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_5.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 5, 5)), UInt<1>("h1")) + mem_0_6.portB_clk <= portB_clk + mem_0_6.portB_addr <= portB_addr + node portB_dout_0_6 = bits(mem_0_6.portB_dout, 7, 0) + mem_0_6.portB_din <= bits(portB_din, 55, 48) + mem_0_6.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_6.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 6, 6)), UInt<1>("h1")) + mem_0_7.portB_clk <= portB_clk + mem_0_7.portB_addr <= portB_addr + node portB_dout_0_7 = bits(mem_0_7.portB_dout, 7, 0) + mem_0_7.portB_din <= bits(portB_din, 63, 56) + mem_0_7.portB_read_en <= and(portB_read_en, UInt<1>("h1")) + mem_0_7.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 7, 7)), UInt<1>("h1")) + node portB_dout_0 = cat(portB_dout_0_7, cat(portB_dout_0_6, cat(portB_dout_0_5, cat(portB_dout_0_4, cat(portB_dout_0_3, cat(portB_dout_0_2, cat(portB_dout_0_1, portB_dout_0_0))))))) + portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<1>("h0")) + portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<1>("h0")) +""" + + compileExecuteAndTest(mem, lib, v, output) +} diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 2775f9a72..338569d66 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -21,6 +21,1217 @@ class GenerateSomeVerilog extends MacroCompilerSpec with HasSRAMGenerator with H } } +class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { + val mem = s"mem-BOOMTest.json" + val lib = s"lib-BOOMTest.json" + val v = s"BOOMTest.v" + + override val libPrefix = "macros/src/test/resources" + + val memSRAMs = mdf.macrolib.Utils.readMDFFromString( +""" +[ { + "type" : "sram", + "name" : "_T_182_ext", + "width" : 88, + "depth" : 64, + "ports" : [ { + "address port name" : "R0_addr", + "address port polarity" : "active high", + "clock port name" : "R0_clk", + "clock port polarity" : "active high", + "chip enable port name" : "R0_en", + "chip enable port polarity" : "active high", + "output port name" : "R0_data", + "output port polarity" : "active high" + }, { + "address port name" : "W0_addr", + "address port polarity" : "active high", + "clock port name" : "W0_clk", + "clock port polarity" : "active high", + "chip enable port name" : "W0_en", + "chip enable port polarity" : "active high", + "input port name" : "W0_data", + "input port polarity" : "active high", + "mask port name" : "W0_mask", + "mask port polarity" : "active high", + "mask granularity" : 22 + } ] +}, { + "type" : "sram", + "name" : "_T_84_ext", + "width" : 64, + "depth" : 512, + "ports" : [ { + "address port name" : "R0_addr", + "address port polarity" : "active high", + "clock port name" : "R0_clk", + "clock port polarity" : "active high", + "chip enable port name" : "R0_en", + "chip enable port polarity" : "active high", + "output port name" : "R0_data", + "output port polarity" : "active high" + }, { + "address port name" : "W0_addr", + "address port polarity" : "active high", + "clock port name" : "W0_clk", + "clock port polarity" : "active high", + "chip enable port name" : "W0_en", + "chip enable port polarity" : "active high", + "input port name" : "W0_data", + "input port polarity" : "active high", + "mask port name" : "W0_mask", + "mask port polarity" : "active high", + "mask granularity" : 64 + } ] +}, { + "type" : "sram", + "name" : "tag_array_ext", + "width" : 80, + "depth" : 64, + "ports" : [ { + "address port name" : "RW0_addr", + "address port polarity" : "active high", + "clock port name" : "RW0_clk", + "clock port polarity" : "active high", + "write enable port name" : "RW0_wmode", + "write enable port polarity" : "active high", + "chip enable port name" : "RW0_en", + "chip enable port polarity" : "active high", + "output port name" : "RW0_rdata", + "output port polarity" : "active high", + "input port name" : "RW0_wdata", + "input port polarity" : "active high", + "mask port name" : "RW0_wmask", + "mask port polarity" : "active high", + "mask granularity" : 20 + } ] +}, { + "type" : "sram", + "name" : "_T_886_ext", + "width" : 64, + "depth" : 512, + "ports" : [ { + "address port name" : "RW0_addr", + "address port polarity" : "active high", + "clock port name" : "RW0_clk", + "clock port polarity" : "active high", + "write enable port name" : "RW0_wmode", + "write enable port polarity" : "active high", + "chip enable port name" : "RW0_en", + "chip enable port polarity" : "active high", + "output port name" : "RW0_rdata", + "output port polarity" : "active high", + "input port name" : "RW0_wdata", + "input port polarity" : "active high" + } ] +}, { + "type" : "sram", + "name" : "entries_info_ext", + "width" : 40, + "depth" : 24, + "ports" : [ { + "address port name" : "R0_addr", + "address port polarity" : "active high", + "clock port name" : "R0_clk", + "clock port polarity" : "active high", + "chip enable port name" : "R0_en", + "chip enable port polarity" : "active high", + "output port name" : "R0_data", + "output port polarity" : "active high" + }, { + "address port name" : "W0_addr", + "address port polarity" : "active high", + "clock port name" : "W0_clk", + "clock port polarity" : "active high", + "chip enable port name" : "W0_en", + "chip enable port polarity" : "active high", + "input port name" : "W0_data", + "input port polarity" : "active high" + } ] +}, { + "type" : "sram", + "name" : "smem_ext", + "width" : 32, + "depth" : 32, + "ports" : [ { + "address port name" : "RW0_addr", + "address port polarity" : "active high", + "clock port name" : "RW0_clk", + "clock port polarity" : "active high", + "write enable port name" : "RW0_wmode", + "write enable port polarity" : "active high", + "chip enable port name" : "RW0_en", + "chip enable port polarity" : "active high", + "output port name" : "RW0_rdata", + "output port polarity" : "active high", + "input port name" : "RW0_wdata", + "input port polarity" : "active high", + "mask port name" : "RW0_wmask", + "mask port polarity" : "active high", + "mask granularity" : 1 + } ] +}, { + "type" : "sram", + "name" : "smem_0_ext", + "width" : 32, + "depth" : 64, + "ports" : [ { + "address port name" : "RW0_addr", + "address port polarity" : "active high", + "clock port name" : "RW0_clk", + "clock port polarity" : "active high", + "write enable port name" : "RW0_wmode", + "write enable port polarity" : "active high", + "chip enable port name" : "RW0_en", + "chip enable port polarity" : "active high", + "output port name" : "RW0_rdata", + "output port polarity" : "active high", + "input port name" : "RW0_wdata", + "input port polarity" : "active high", + "mask port name" : "RW0_wmask", + "mask port polarity" : "active high", + "mask granularity" : 1 + } ] +} ] +""").getOrElse(List()) + + writeToMem(mem, memSRAMs) + + val output = // TODO: check correctness... +""" +circuit smem_0_ext : + module _T_182_ext : + input R0_clk : Clock + input R0_addr : UInt<6> + output R0_data : UInt<88> + input R0_en : UInt<1> + input W0_clk : Clock + input W0_addr : UInt<6> + input W0_data : UInt<88> + input W0_en : UInt<1> + input W0_mask : UInt<4> + + node R0_addr_sel = bits(R0_addr, 5, 5) + reg R0_addr_sel_reg : UInt<1>, R0_clk with : + reset => (UInt<1>("h0"), R0_addr_sel_reg) + R0_addr_sel_reg <= mux(R0_en, R0_addr_sel, R0_addr_sel_reg) + node W0_addr_sel = bits(W0_addr, 5, 5) + inst mem_0_0 of my_sram_2rw_32x22 + inst mem_0_1 of my_sram_2rw_32x22 + inst mem_0_2 of my_sram_2rw_32x22 + inst mem_0_3 of my_sram_2rw_32x22 + mem_0_0.CE1 <= W0_clk + mem_0_0.A1 <= W0_addr + mem_0_0.I1 <= bits(W0_data, 21, 0) + mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_1.CE1 <= W0_clk + mem_0_1.A1 <= W0_addr + mem_0_1.I1 <= bits(W0_data, 43, 22) + mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_2.CE1 <= W0_clk + mem_0_2.A1 <= W0_addr + mem_0_2.I1 <= bits(W0_data, 65, 44) + mem_0_2.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_2.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_3.CE1 <= W0_clk + mem_0_3.A1 <= W0_addr + mem_0_3.I1 <= bits(W0_data, 87, 66) + mem_0_3.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_3.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_0.CE2 <= R0_clk + mem_0_0.A2 <= R0_addr + node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) + mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_1.CE2 <= R0_clk + mem_0_1.A2 <= R0_addr + node R0_data_0_1 = bits(mem_0_1.O2, 21, 0) + mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_2.CE2 <= R0_clk + mem_0_2.A2 <= R0_addr + node R0_data_0_2 = bits(mem_0_2.O2, 21, 0) + mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_2.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_3.CE2 <= R0_clk + mem_0_3.A2 <= R0_addr + node R0_data_0_3 = bits(mem_0_3.O2, 21, 0) + mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) + node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) + inst mem_1_0 of my_sram_2rw_32x22 + inst mem_1_1 of my_sram_2rw_32x22 + inst mem_1_2 of my_sram_2rw_32x22 + inst mem_1_3 of my_sram_2rw_32x22 + mem_1_0.CE1 <= W0_clk + mem_1_0.A1 <= W0_addr + mem_1_0.I1 <= bits(W0_data, 21, 0) + mem_1_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_1.CE1 <= W0_clk + mem_1_1.A1 <= W0_addr + mem_1_1.I1 <= bits(W0_data, 43, 22) + mem_1_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_2.CE1 <= W0_clk + mem_1_2.A1 <= W0_addr + mem_1_2.I1 <= bits(W0_data, 65, 44) + mem_1_2.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_2.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_3.CE1 <= W0_clk + mem_1_3.A1 <= W0_addr + mem_1_3.I1 <= bits(W0_data, 87, 66) + mem_1_3.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_3.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_0.CE2 <= R0_clk + mem_1_0.A2 <= R0_addr + node R0_data_1_0 = bits(mem_1_0.O2, 21, 0) + mem_1_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_1.CE2 <= R0_clk + mem_1_1.A2 <= R0_addr + node R0_data_1_1 = bits(mem_1_1.O2, 21, 0) + mem_1_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_2.CE2 <= R0_clk + mem_1_2.A2 <= R0_addr + node R0_data_1_2 = bits(mem_1_2.O2, 21, 0) + mem_1_2.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_2.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_3.CE2 <= R0_clk + mem_1_3.A2 <= R0_addr + node R0_data_1_3 = bits(mem_1_3.O2, 21, 0) + mem_1_3.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) + node R0_data_1 = cat(R0_data_1_3, cat(R0_data_1_2, cat(R0_data_1_1, R0_data_1_0))) + R0_data <= mux(eq(R0_addr_sel_reg, UInt<1>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<1>("h1")), R0_data_1, UInt<1>("h0"))) + + extmodule my_sram_2rw_32x22 : + input CE1 : Clock + input A1 : UInt<5> + input I1 : UInt<22> + output O1 : UInt<22> + input CSB1 : UInt<1> + input OEB1 : UInt<1> + input WEB1 : UInt<1> + input CE2 : Clock + input A2 : UInt<5> + input I2 : UInt<22> + output O2 : UInt<22> + input CSB2 : UInt<1> + input OEB2 : UInt<1> + input WEB2 : UInt<1> + + defname = my_sram_2rw_32x22 + + + module _T_84_ext : + input R0_clk : Clock + input R0_addr : UInt<9> + output R0_data : UInt<64> + input R0_en : UInt<1> + input W0_clk : Clock + input W0_addr : UInt<9> + input W0_data : UInt<64> + input W0_en : UInt<1> + input W0_mask : UInt<1> + + node R0_addr_sel = bits(R0_addr, 8, 7) + reg R0_addr_sel_reg : UInt<2>, R0_clk with : + reset => (UInt<1>("h0"), R0_addr_sel_reg) + R0_addr_sel_reg <= mux(R0_en, R0_addr_sel, R0_addr_sel_reg) + node W0_addr_sel = bits(W0_addr, 8, 7) + inst mem_0_0 of my_sram_2rw_128x32 + inst mem_0_1 of my_sram_2rw_128x32 + mem_0_0.CE1 <= W0_clk + mem_0_0.A1 <= W0_addr + mem_0_0.I1 <= bits(W0_data, 31, 0) + mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h0")))) + mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h0")))) + mem_0_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h0")))) + mem_0_1.CE1 <= W0_clk + mem_0_1.A1 <= W0_addr + mem_0_1.I1 <= bits(W0_data, 63, 32) + mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h0")))) + mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h0")))) + mem_0_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h0")))) + mem_0_0.CE2 <= R0_clk + mem_0_0.A2 <= R0_addr + node R0_data_0_0 = bits(mem_0_0.O2, 31, 0) + mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h0")))) + mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h0")))) + mem_0_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h0")))) + mem_0_1.CE2 <= R0_clk + mem_0_1.A2 <= R0_addr + node R0_data_0_1 = bits(mem_0_1.O2, 31, 0) + mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h0")))) + mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h0")))) + mem_0_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h0")))) + node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) + inst mem_1_0 of my_sram_2rw_128x32 + inst mem_1_1 of my_sram_2rw_128x32 + mem_1_0.CE1 <= W0_clk + mem_1_0.A1 <= W0_addr + mem_1_0.I1 <= bits(W0_data, 31, 0) + mem_1_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h1")))) + mem_1_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h1")))) + mem_1_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h1")))) + mem_1_1.CE1 <= W0_clk + mem_1_1.A1 <= W0_addr + mem_1_1.I1 <= bits(W0_data, 63, 32) + mem_1_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h1")))) + mem_1_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h1")))) + mem_1_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h1")))) + mem_1_0.CE2 <= R0_clk + mem_1_0.A2 <= R0_addr + node R0_data_1_0 = bits(mem_1_0.O2, 31, 0) + mem_1_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h1")))) + mem_1_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h1")))) + mem_1_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h1")))) + mem_1_1.CE2 <= R0_clk + mem_1_1.A2 <= R0_addr + node R0_data_1_1 = bits(mem_1_1.O2, 31, 0) + mem_1_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h1")))) + mem_1_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h1")))) + mem_1_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h1")))) + node R0_data_1 = cat(R0_data_1_1, R0_data_1_0) + inst mem_2_0 of my_sram_2rw_128x32 + inst mem_2_1 of my_sram_2rw_128x32 + mem_2_0.CE1 <= W0_clk + mem_2_0.A1 <= W0_addr + mem_2_0.I1 <= bits(W0_data, 31, 0) + mem_2_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h2")))) + mem_2_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h2")))) + mem_2_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h2")))) + mem_2_1.CE1 <= W0_clk + mem_2_1.A1 <= W0_addr + mem_2_1.I1 <= bits(W0_data, 63, 32) + mem_2_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h2")))) + mem_2_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h2")))) + mem_2_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h2")))) + mem_2_0.CE2 <= R0_clk + mem_2_0.A2 <= R0_addr + node R0_data_2_0 = bits(mem_2_0.O2, 31, 0) + mem_2_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h2")))) + mem_2_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h2")))) + mem_2_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h2")))) + mem_2_1.CE2 <= R0_clk + mem_2_1.A2 <= R0_addr + node R0_data_2_1 = bits(mem_2_1.O2, 31, 0) + mem_2_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h2")))) + mem_2_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h2")))) + mem_2_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h2")))) + node R0_data_2 = cat(R0_data_2_1, R0_data_2_0) + inst mem_3_0 of my_sram_2rw_128x32 + inst mem_3_1 of my_sram_2rw_128x32 + mem_3_0.CE1 <= W0_clk + mem_3_0.A1 <= W0_addr + mem_3_0.I1 <= bits(W0_data, 31, 0) + mem_3_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h3")))) + mem_3_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h3")))) + mem_3_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h3")))) + mem_3_1.CE1 <= W0_clk + mem_3_1.A1 <= W0_addr + mem_3_1.I1 <= bits(W0_data, 63, 32) + mem_3_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h3")))) + mem_3_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h3")))) + mem_3_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h3")))) + mem_3_0.CE2 <= R0_clk + mem_3_0.A2 <= R0_addr + node R0_data_3_0 = bits(mem_3_0.O2, 31, 0) + mem_3_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h3")))) + mem_3_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) + mem_3_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) + mem_3_1.CE2 <= R0_clk + mem_3_1.A2 <= R0_addr + node R0_data_3_1 = bits(mem_3_1.O2, 31, 0) + mem_3_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h3")))) + mem_3_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) + mem_3_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) + node R0_data_3 = cat(R0_data_3_1, R0_data_3_0) + R0_data <= mux(eq(R0_addr_sel_reg, UInt<2>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<2>("h1")), R0_data_1, mux(eq(R0_addr_sel_reg, UInt<2>("h2")), R0_data_2, mux(eq(R0_addr_sel_reg, UInt<2>("h3")), R0_data_3, UInt<1>("h0"))))) + + extmodule my_sram_2rw_128x32 : + input CE1 : Clock + input A1 : UInt<7> + input I1 : UInt<32> + output O1 : UInt<32> + input CSB1 : UInt<1> + input OEB1 : UInt<1> + input WEB1 : UInt<1> + input CE2 : Clock + input A2 : UInt<7> + input I2 : UInt<32> + output O2 : UInt<32> + input CSB2 : UInt<1> + input OEB2 : UInt<1> + input WEB2 : UInt<1> + + defname = my_sram_2rw_128x32 + + + module tag_array_ext : + input RW0_clk : Clock + input RW0_addr : UInt<6> + input RW0_wdata : UInt<80> + output RW0_rdata : UInt<80> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + input RW0_wmask : UInt<4> + + inst mem_0_0 of my_sram_1rw_64x32 + inst mem_0_1 of my_sram_1rw_64x32 + inst mem_0_2 of my_sram_1rw_64x32 + inst mem_0_3 of my_sram_1rw_64x32 + mem_0_0.CE <= RW0_clk + mem_0_0.A <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.O, 19, 0) + mem_0_0.I <= bits(RW0_wdata, 19, 0) + mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) + mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_1.CE <= RW0_clk + mem_0_1.A <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.O, 19, 0) + mem_0_1.I <= bits(RW0_wdata, 39, 20) + mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) + mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_2.CE <= RW0_clk + mem_0_2.A <= RW0_addr + node RW0_rdata_0_2 = bits(mem_0_2.O, 19, 0) + mem_0_2.I <= bits(RW0_wdata, 59, 40) + mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) + mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_3.CE <= RW0_clk + mem_0_3.A <= RW0_addr + node RW0_rdata_0_3 = bits(mem_0_3.O, 19, 0) + mem_0_3.I <= bits(RW0_wdata, 79, 60) + mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) + mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) + node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule my_sram_1rw_64x32 : + input CE : Clock + input A : UInt<6> + input I : UInt<32> + output O : UInt<32> + input CSB : UInt<1> + input OEB : UInt<1> + input WEB : UInt<1> + + defname = my_sram_1rw_64x32 + + + module _T_886_ext : + input RW0_clk : Clock + input RW0_addr : UInt<9> + input RW0_wdata : UInt<64> + output RW0_rdata : UInt<64> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + + inst mem_0_0 of my_sram_1rw_512x32 + inst mem_0_1 of my_sram_1rw_512x32 + mem_0_0.CE <= RW0_clk + mem_0_0.A <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.O, 31, 0) + mem_0_0.I <= bits(RW0_wdata, 31, 0) + mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_1.CE <= RW0_clk + mem_0_1.A <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.O, 31, 0) + mem_0_1.I <= bits(RW0_wdata, 63, 32) + mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) + node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule my_sram_1rw_512x32 : + input CE : Clock + input A : UInt<9> + input I : UInt<32> + output O : UInt<32> + input CSB : UInt<1> + input OEB : UInt<1> + input WEB : UInt<1> + + defname = my_sram_1rw_512x32 + + + module entries_info_ext : + input R0_clk : Clock + input R0_addr : UInt<5> + output R0_data : UInt<40> + input R0_en : UInt<1> + input W0_clk : Clock + input W0_addr : UInt<5> + input W0_data : UInt<40> + input W0_en : UInt<1> + + inst mem_0_0 of my_sram_2rw_32x8 + inst mem_0_1 of my_sram_2rw_32x8 + inst mem_0_2 of my_sram_2rw_32x8 + inst mem_0_3 of my_sram_2rw_32x8 + inst mem_0_4 of my_sram_2rw_32x8 + mem_0_0.CE1 <= W0_clk + mem_0_0.A1 <= W0_addr + mem_0_0.I1 <= bits(W0_data, 7, 0) + mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_1.CE1 <= W0_clk + mem_0_1.A1 <= W0_addr + mem_0_1.I1 <= bits(W0_data, 15, 8) + mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_2.CE1 <= W0_clk + mem_0_2.A1 <= W0_addr + mem_0_2.I1 <= bits(W0_data, 23, 16) + mem_0_2.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_2.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_3.CE1 <= W0_clk + mem_0_3.A1 <= W0_addr + mem_0_3.I1 <= bits(W0_data, 31, 24) + mem_0_3.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_3.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_4.CE1 <= W0_clk + mem_0_4.A1 <= W0_addr + mem_0_4.I1 <= bits(W0_data, 39, 32) + mem_0_4.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_4.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_4.CSB1 <= not(and(W0_en, UInt<1>("h1"))) + mem_0_0.CE2 <= R0_clk + mem_0_0.A2 <= R0_addr + node R0_data_0_0 = bits(mem_0_0.O2, 7, 0) + mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + mem_0_1.CE2 <= R0_clk + mem_0_1.A2 <= R0_addr + node R0_data_0_1 = bits(mem_0_1.O2, 7, 0) + mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + mem_0_2.CE2 <= R0_clk + mem_0_2.A2 <= R0_addr + node R0_data_0_2 = bits(mem_0_2.O2, 7, 0) + mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_2.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + mem_0_3.CE2 <= R0_clk + mem_0_3.A2 <= R0_addr + node R0_data_0_3 = bits(mem_0_3.O2, 7, 0) + mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_3.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + mem_0_4.CE2 <= R0_clk + mem_0_4.A2 <= R0_addr + node R0_data_0_4 = bits(mem_0_4.O2, 7, 0) + mem_0_4.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_4.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) + mem_0_4.CSB2 <= not(and(R0_en, UInt<1>("h1"))) + node R0_data_0 = cat(R0_data_0_4, cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0)))) + R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) + + extmodule my_sram_2rw_32x8 : + input CE1 : Clock + input A1 : UInt<5> + input I1 : UInt<8> + output O1 : UInt<8> + input CSB1 : UInt<1> + input OEB1 : UInt<1> + input WEB1 : UInt<1> + input CE2 : Clock + input A2 : UInt<5> + input I2 : UInt<8> + output O2 : UInt<8> + input CSB2 : UInt<1> + input OEB2 : UInt<1> + input WEB2 : UInt<1> + + defname = my_sram_2rw_32x8 + + + module smem_ext : + input RW0_clk : Clock + input RW0_addr : UInt<5> + input RW0_wdata : UInt<32> + output RW0_rdata : UInt<32> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + input RW0_wmask : UInt<32> + + inst mem_0_0 of my_sram_1rw_64x8 + inst mem_0_1 of my_sram_1rw_64x8 + inst mem_0_2 of my_sram_1rw_64x8 + inst mem_0_3 of my_sram_1rw_64x8 + inst mem_0_4 of my_sram_1rw_64x8 + inst mem_0_5 of my_sram_1rw_64x8 + inst mem_0_6 of my_sram_1rw_64x8 + inst mem_0_7 of my_sram_1rw_64x8 + inst mem_0_8 of my_sram_1rw_64x8 + inst mem_0_9 of my_sram_1rw_64x8 + inst mem_0_10 of my_sram_1rw_64x8 + inst mem_0_11 of my_sram_1rw_64x8 + inst mem_0_12 of my_sram_1rw_64x8 + inst mem_0_13 of my_sram_1rw_64x8 + inst mem_0_14 of my_sram_1rw_64x8 + inst mem_0_15 of my_sram_1rw_64x8 + inst mem_0_16 of my_sram_1rw_64x8 + inst mem_0_17 of my_sram_1rw_64x8 + inst mem_0_18 of my_sram_1rw_64x8 + inst mem_0_19 of my_sram_1rw_64x8 + inst mem_0_20 of my_sram_1rw_64x8 + inst mem_0_21 of my_sram_1rw_64x8 + inst mem_0_22 of my_sram_1rw_64x8 + inst mem_0_23 of my_sram_1rw_64x8 + inst mem_0_24 of my_sram_1rw_64x8 + inst mem_0_25 of my_sram_1rw_64x8 + inst mem_0_26 of my_sram_1rw_64x8 + inst mem_0_27 of my_sram_1rw_64x8 + inst mem_0_28 of my_sram_1rw_64x8 + inst mem_0_29 of my_sram_1rw_64x8 + inst mem_0_30 of my_sram_1rw_64x8 + inst mem_0_31 of my_sram_1rw_64x8 + mem_0_0.CE <= RW0_clk + mem_0_0.A <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.O, 0, 0) + mem_0_0.I <= bits(RW0_wdata, 0, 0) + mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) + mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_1.CE <= RW0_clk + mem_0_1.A <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.O, 0, 0) + mem_0_1.I <= bits(RW0_wdata, 1, 1) + mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) + mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_2.CE <= RW0_clk + mem_0_2.A <= RW0_addr + node RW0_rdata_0_2 = bits(mem_0_2.O, 0, 0) + mem_0_2.I <= bits(RW0_wdata, 2, 2) + mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) + mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_3.CE <= RW0_clk + mem_0_3.A <= RW0_addr + node RW0_rdata_0_3 = bits(mem_0_3.O, 0, 0) + mem_0_3.I <= bits(RW0_wdata, 3, 3) + mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) + mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_4.CE <= RW0_clk + mem_0_4.A <= RW0_addr + node RW0_rdata_0_4 = bits(mem_0_4.O, 0, 0) + mem_0_4.I <= bits(RW0_wdata, 4, 4) + mem_0_4.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) + mem_0_4.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_5.CE <= RW0_clk + mem_0_5.A <= RW0_addr + node RW0_rdata_0_5 = bits(mem_0_5.O, 0, 0) + mem_0_5.I <= bits(RW0_wdata, 5, 5) + mem_0_5.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) + mem_0_5.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_6.CE <= RW0_clk + mem_0_6.A <= RW0_addr + node RW0_rdata_0_6 = bits(mem_0_6.O, 0, 0) + mem_0_6.I <= bits(RW0_wdata, 6, 6) + mem_0_6.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) + mem_0_6.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_7.CE <= RW0_clk + mem_0_7.A <= RW0_addr + node RW0_rdata_0_7 = bits(mem_0_7.O, 0, 0) + mem_0_7.I <= bits(RW0_wdata, 7, 7) + mem_0_7.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) + mem_0_7.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_8.CE <= RW0_clk + mem_0_8.A <= RW0_addr + node RW0_rdata_0_8 = bits(mem_0_8.O, 0, 0) + mem_0_8.I <= bits(RW0_wdata, 8, 8) + mem_0_8.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_8.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 8, 8)), UInt<1>("h1"))) + mem_0_8.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_9.CE <= RW0_clk + mem_0_9.A <= RW0_addr + node RW0_rdata_0_9 = bits(mem_0_9.O, 0, 0) + mem_0_9.I <= bits(RW0_wdata, 9, 9) + mem_0_9.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_9.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 9, 9)), UInt<1>("h1"))) + mem_0_9.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_10.CE <= RW0_clk + mem_0_10.A <= RW0_addr + node RW0_rdata_0_10 = bits(mem_0_10.O, 0, 0) + mem_0_10.I <= bits(RW0_wdata, 10, 10) + mem_0_10.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_10.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 10, 10)), UInt<1>("h1"))) + mem_0_10.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_11.CE <= RW0_clk + mem_0_11.A <= RW0_addr + node RW0_rdata_0_11 = bits(mem_0_11.O, 0, 0) + mem_0_11.I <= bits(RW0_wdata, 11, 11) + mem_0_11.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_11.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 11, 11)), UInt<1>("h1"))) + mem_0_11.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_12.CE <= RW0_clk + mem_0_12.A <= RW0_addr + node RW0_rdata_0_12 = bits(mem_0_12.O, 0, 0) + mem_0_12.I <= bits(RW0_wdata, 12, 12) + mem_0_12.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_12.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 12, 12)), UInt<1>("h1"))) + mem_0_12.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_13.CE <= RW0_clk + mem_0_13.A <= RW0_addr + node RW0_rdata_0_13 = bits(mem_0_13.O, 0, 0) + mem_0_13.I <= bits(RW0_wdata, 13, 13) + mem_0_13.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_13.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 13, 13)), UInt<1>("h1"))) + mem_0_13.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_14.CE <= RW0_clk + mem_0_14.A <= RW0_addr + node RW0_rdata_0_14 = bits(mem_0_14.O, 0, 0) + mem_0_14.I <= bits(RW0_wdata, 14, 14) + mem_0_14.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_14.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 14, 14)), UInt<1>("h1"))) + mem_0_14.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_15.CE <= RW0_clk + mem_0_15.A <= RW0_addr + node RW0_rdata_0_15 = bits(mem_0_15.O, 0, 0) + mem_0_15.I <= bits(RW0_wdata, 15, 15) + mem_0_15.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_15.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 15, 15)), UInt<1>("h1"))) + mem_0_15.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_16.CE <= RW0_clk + mem_0_16.A <= RW0_addr + node RW0_rdata_0_16 = bits(mem_0_16.O, 0, 0) + mem_0_16.I <= bits(RW0_wdata, 16, 16) + mem_0_16.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_16.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 16, 16)), UInt<1>("h1"))) + mem_0_16.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_17.CE <= RW0_clk + mem_0_17.A <= RW0_addr + node RW0_rdata_0_17 = bits(mem_0_17.O, 0, 0) + mem_0_17.I <= bits(RW0_wdata, 17, 17) + mem_0_17.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_17.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 17, 17)), UInt<1>("h1"))) + mem_0_17.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_18.CE <= RW0_clk + mem_0_18.A <= RW0_addr + node RW0_rdata_0_18 = bits(mem_0_18.O, 0, 0) + mem_0_18.I <= bits(RW0_wdata, 18, 18) + mem_0_18.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_18.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 18, 18)), UInt<1>("h1"))) + mem_0_18.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_19.CE <= RW0_clk + mem_0_19.A <= RW0_addr + node RW0_rdata_0_19 = bits(mem_0_19.O, 0, 0) + mem_0_19.I <= bits(RW0_wdata, 19, 19) + mem_0_19.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_19.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 19, 19)), UInt<1>("h1"))) + mem_0_19.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_20.CE <= RW0_clk + mem_0_20.A <= RW0_addr + node RW0_rdata_0_20 = bits(mem_0_20.O, 0, 0) + mem_0_20.I <= bits(RW0_wdata, 20, 20) + mem_0_20.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_20.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 20, 20)), UInt<1>("h1"))) + mem_0_20.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_21.CE <= RW0_clk + mem_0_21.A <= RW0_addr + node RW0_rdata_0_21 = bits(mem_0_21.O, 0, 0) + mem_0_21.I <= bits(RW0_wdata, 21, 21) + mem_0_21.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_21.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 21, 21)), UInt<1>("h1"))) + mem_0_21.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_22.CE <= RW0_clk + mem_0_22.A <= RW0_addr + node RW0_rdata_0_22 = bits(mem_0_22.O, 0, 0) + mem_0_22.I <= bits(RW0_wdata, 22, 22) + mem_0_22.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_22.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 22, 22)), UInt<1>("h1"))) + mem_0_22.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_23.CE <= RW0_clk + mem_0_23.A <= RW0_addr + node RW0_rdata_0_23 = bits(mem_0_23.O, 0, 0) + mem_0_23.I <= bits(RW0_wdata, 23, 23) + mem_0_23.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_23.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 23, 23)), UInt<1>("h1"))) + mem_0_23.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_24.CE <= RW0_clk + mem_0_24.A <= RW0_addr + node RW0_rdata_0_24 = bits(mem_0_24.O, 0, 0) + mem_0_24.I <= bits(RW0_wdata, 24, 24) + mem_0_24.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_24.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 24, 24)), UInt<1>("h1"))) + mem_0_24.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_25.CE <= RW0_clk + mem_0_25.A <= RW0_addr + node RW0_rdata_0_25 = bits(mem_0_25.O, 0, 0) + mem_0_25.I <= bits(RW0_wdata, 25, 25) + mem_0_25.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_25.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 25, 25)), UInt<1>("h1"))) + mem_0_25.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_26.CE <= RW0_clk + mem_0_26.A <= RW0_addr + node RW0_rdata_0_26 = bits(mem_0_26.O, 0, 0) + mem_0_26.I <= bits(RW0_wdata, 26, 26) + mem_0_26.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_26.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 26, 26)), UInt<1>("h1"))) + mem_0_26.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_27.CE <= RW0_clk + mem_0_27.A <= RW0_addr + node RW0_rdata_0_27 = bits(mem_0_27.O, 0, 0) + mem_0_27.I <= bits(RW0_wdata, 27, 27) + mem_0_27.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_27.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 27, 27)), UInt<1>("h1"))) + mem_0_27.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_28.CE <= RW0_clk + mem_0_28.A <= RW0_addr + node RW0_rdata_0_28 = bits(mem_0_28.O, 0, 0) + mem_0_28.I <= bits(RW0_wdata, 28, 28) + mem_0_28.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_28.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 28, 28)), UInt<1>("h1"))) + mem_0_28.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_29.CE <= RW0_clk + mem_0_29.A <= RW0_addr + node RW0_rdata_0_29 = bits(mem_0_29.O, 0, 0) + mem_0_29.I <= bits(RW0_wdata, 29, 29) + mem_0_29.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_29.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 29, 29)), UInt<1>("h1"))) + mem_0_29.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_30.CE <= RW0_clk + mem_0_30.A <= RW0_addr + node RW0_rdata_0_30 = bits(mem_0_30.O, 0, 0) + mem_0_30.I <= bits(RW0_wdata, 30, 30) + mem_0_30.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_30.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 30, 30)), UInt<1>("h1"))) + mem_0_30.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_31.CE <= RW0_clk + mem_0_31.A <= RW0_addr + node RW0_rdata_0_31 = bits(mem_0_31.O, 0, 0) + mem_0_31.I <= bits(RW0_wdata, 31, 31) + mem_0_31.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_31.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 31, 31)), UInt<1>("h1"))) + mem_0_31.CSB <= not(and(RW0_en, UInt<1>("h1"))) + node RW0_rdata_0 = cat(RW0_rdata_0_31, cat(RW0_rdata_0_30, cat(RW0_rdata_0_29, cat(RW0_rdata_0_28, cat(RW0_rdata_0_27, cat(RW0_rdata_0_26, cat(RW0_rdata_0_25, cat(RW0_rdata_0_24, cat(RW0_rdata_0_23, cat(RW0_rdata_0_22, cat(RW0_rdata_0_21, cat(RW0_rdata_0_20, cat(RW0_rdata_0_19, cat(RW0_rdata_0_18, cat(RW0_rdata_0_17, cat(RW0_rdata_0_16, cat(RW0_rdata_0_15, cat(RW0_rdata_0_14, cat(RW0_rdata_0_13, cat(RW0_rdata_0_12, cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))))))))))))))))))))))) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + module smem_0_ext : + input RW0_clk : Clock + input RW0_addr : UInt<6> + input RW0_wdata : UInt<32> + output RW0_rdata : UInt<32> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + input RW0_wmask : UInt<32> + + inst mem_0_0 of my_sram_1rw_64x8 + inst mem_0_1 of my_sram_1rw_64x8 + inst mem_0_2 of my_sram_1rw_64x8 + inst mem_0_3 of my_sram_1rw_64x8 + inst mem_0_4 of my_sram_1rw_64x8 + inst mem_0_5 of my_sram_1rw_64x8 + inst mem_0_6 of my_sram_1rw_64x8 + inst mem_0_7 of my_sram_1rw_64x8 + inst mem_0_8 of my_sram_1rw_64x8 + inst mem_0_9 of my_sram_1rw_64x8 + inst mem_0_10 of my_sram_1rw_64x8 + inst mem_0_11 of my_sram_1rw_64x8 + inst mem_0_12 of my_sram_1rw_64x8 + inst mem_0_13 of my_sram_1rw_64x8 + inst mem_0_14 of my_sram_1rw_64x8 + inst mem_0_15 of my_sram_1rw_64x8 + inst mem_0_16 of my_sram_1rw_64x8 + inst mem_0_17 of my_sram_1rw_64x8 + inst mem_0_18 of my_sram_1rw_64x8 + inst mem_0_19 of my_sram_1rw_64x8 + inst mem_0_20 of my_sram_1rw_64x8 + inst mem_0_21 of my_sram_1rw_64x8 + inst mem_0_22 of my_sram_1rw_64x8 + inst mem_0_23 of my_sram_1rw_64x8 + inst mem_0_24 of my_sram_1rw_64x8 + inst mem_0_25 of my_sram_1rw_64x8 + inst mem_0_26 of my_sram_1rw_64x8 + inst mem_0_27 of my_sram_1rw_64x8 + inst mem_0_28 of my_sram_1rw_64x8 + inst mem_0_29 of my_sram_1rw_64x8 + inst mem_0_30 of my_sram_1rw_64x8 + inst mem_0_31 of my_sram_1rw_64x8 + mem_0_0.CE <= RW0_clk + mem_0_0.A <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.O, 0, 0) + mem_0_0.I <= bits(RW0_wdata, 0, 0) + mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) + mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_1.CE <= RW0_clk + mem_0_1.A <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.O, 0, 0) + mem_0_1.I <= bits(RW0_wdata, 1, 1) + mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) + mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_2.CE <= RW0_clk + mem_0_2.A <= RW0_addr + node RW0_rdata_0_2 = bits(mem_0_2.O, 0, 0) + mem_0_2.I <= bits(RW0_wdata, 2, 2) + mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) + mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_3.CE <= RW0_clk + mem_0_3.A <= RW0_addr + node RW0_rdata_0_3 = bits(mem_0_3.O, 0, 0) + mem_0_3.I <= bits(RW0_wdata, 3, 3) + mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) + mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_4.CE <= RW0_clk + mem_0_4.A <= RW0_addr + node RW0_rdata_0_4 = bits(mem_0_4.O, 0, 0) + mem_0_4.I <= bits(RW0_wdata, 4, 4) + mem_0_4.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) + mem_0_4.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_5.CE <= RW0_clk + mem_0_5.A <= RW0_addr + node RW0_rdata_0_5 = bits(mem_0_5.O, 0, 0) + mem_0_5.I <= bits(RW0_wdata, 5, 5) + mem_0_5.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) + mem_0_5.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_6.CE <= RW0_clk + mem_0_6.A <= RW0_addr + node RW0_rdata_0_6 = bits(mem_0_6.O, 0, 0) + mem_0_6.I <= bits(RW0_wdata, 6, 6) + mem_0_6.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) + mem_0_6.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_7.CE <= RW0_clk + mem_0_7.A <= RW0_addr + node RW0_rdata_0_7 = bits(mem_0_7.O, 0, 0) + mem_0_7.I <= bits(RW0_wdata, 7, 7) + mem_0_7.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) + mem_0_7.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_8.CE <= RW0_clk + mem_0_8.A <= RW0_addr + node RW0_rdata_0_8 = bits(mem_0_8.O, 0, 0) + mem_0_8.I <= bits(RW0_wdata, 8, 8) + mem_0_8.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_8.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 8, 8)), UInt<1>("h1"))) + mem_0_8.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_9.CE <= RW0_clk + mem_0_9.A <= RW0_addr + node RW0_rdata_0_9 = bits(mem_0_9.O, 0, 0) + mem_0_9.I <= bits(RW0_wdata, 9, 9) + mem_0_9.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_9.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 9, 9)), UInt<1>("h1"))) + mem_0_9.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_10.CE <= RW0_clk + mem_0_10.A <= RW0_addr + node RW0_rdata_0_10 = bits(mem_0_10.O, 0, 0) + mem_0_10.I <= bits(RW0_wdata, 10, 10) + mem_0_10.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_10.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 10, 10)), UInt<1>("h1"))) + mem_0_10.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_11.CE <= RW0_clk + mem_0_11.A <= RW0_addr + node RW0_rdata_0_11 = bits(mem_0_11.O, 0, 0) + mem_0_11.I <= bits(RW0_wdata, 11, 11) + mem_0_11.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_11.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 11, 11)), UInt<1>("h1"))) + mem_0_11.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_12.CE <= RW0_clk + mem_0_12.A <= RW0_addr + node RW0_rdata_0_12 = bits(mem_0_12.O, 0, 0) + mem_0_12.I <= bits(RW0_wdata, 12, 12) + mem_0_12.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_12.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 12, 12)), UInt<1>("h1"))) + mem_0_12.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_13.CE <= RW0_clk + mem_0_13.A <= RW0_addr + node RW0_rdata_0_13 = bits(mem_0_13.O, 0, 0) + mem_0_13.I <= bits(RW0_wdata, 13, 13) + mem_0_13.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_13.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 13, 13)), UInt<1>("h1"))) + mem_0_13.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_14.CE <= RW0_clk + mem_0_14.A <= RW0_addr + node RW0_rdata_0_14 = bits(mem_0_14.O, 0, 0) + mem_0_14.I <= bits(RW0_wdata, 14, 14) + mem_0_14.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_14.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 14, 14)), UInt<1>("h1"))) + mem_0_14.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_15.CE <= RW0_clk + mem_0_15.A <= RW0_addr + node RW0_rdata_0_15 = bits(mem_0_15.O, 0, 0) + mem_0_15.I <= bits(RW0_wdata, 15, 15) + mem_0_15.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_15.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 15, 15)), UInt<1>("h1"))) + mem_0_15.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_16.CE <= RW0_clk + mem_0_16.A <= RW0_addr + node RW0_rdata_0_16 = bits(mem_0_16.O, 0, 0) + mem_0_16.I <= bits(RW0_wdata, 16, 16) + mem_0_16.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_16.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 16, 16)), UInt<1>("h1"))) + mem_0_16.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_17.CE <= RW0_clk + mem_0_17.A <= RW0_addr + node RW0_rdata_0_17 = bits(mem_0_17.O, 0, 0) + mem_0_17.I <= bits(RW0_wdata, 17, 17) + mem_0_17.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_17.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 17, 17)), UInt<1>("h1"))) + mem_0_17.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_18.CE <= RW0_clk + mem_0_18.A <= RW0_addr + node RW0_rdata_0_18 = bits(mem_0_18.O, 0, 0) + mem_0_18.I <= bits(RW0_wdata, 18, 18) + mem_0_18.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_18.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 18, 18)), UInt<1>("h1"))) + mem_0_18.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_19.CE <= RW0_clk + mem_0_19.A <= RW0_addr + node RW0_rdata_0_19 = bits(mem_0_19.O, 0, 0) + mem_0_19.I <= bits(RW0_wdata, 19, 19) + mem_0_19.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_19.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 19, 19)), UInt<1>("h1"))) + mem_0_19.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_20.CE <= RW0_clk + mem_0_20.A <= RW0_addr + node RW0_rdata_0_20 = bits(mem_0_20.O, 0, 0) + mem_0_20.I <= bits(RW0_wdata, 20, 20) + mem_0_20.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_20.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 20, 20)), UInt<1>("h1"))) + mem_0_20.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_21.CE <= RW0_clk + mem_0_21.A <= RW0_addr + node RW0_rdata_0_21 = bits(mem_0_21.O, 0, 0) + mem_0_21.I <= bits(RW0_wdata, 21, 21) + mem_0_21.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_21.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 21, 21)), UInt<1>("h1"))) + mem_0_21.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_22.CE <= RW0_clk + mem_0_22.A <= RW0_addr + node RW0_rdata_0_22 = bits(mem_0_22.O, 0, 0) + mem_0_22.I <= bits(RW0_wdata, 22, 22) + mem_0_22.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_22.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 22, 22)), UInt<1>("h1"))) + mem_0_22.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_23.CE <= RW0_clk + mem_0_23.A <= RW0_addr + node RW0_rdata_0_23 = bits(mem_0_23.O, 0, 0) + mem_0_23.I <= bits(RW0_wdata, 23, 23) + mem_0_23.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_23.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 23, 23)), UInt<1>("h1"))) + mem_0_23.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_24.CE <= RW0_clk + mem_0_24.A <= RW0_addr + node RW0_rdata_0_24 = bits(mem_0_24.O, 0, 0) + mem_0_24.I <= bits(RW0_wdata, 24, 24) + mem_0_24.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_24.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 24, 24)), UInt<1>("h1"))) + mem_0_24.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_25.CE <= RW0_clk + mem_0_25.A <= RW0_addr + node RW0_rdata_0_25 = bits(mem_0_25.O, 0, 0) + mem_0_25.I <= bits(RW0_wdata, 25, 25) + mem_0_25.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_25.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 25, 25)), UInt<1>("h1"))) + mem_0_25.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_26.CE <= RW0_clk + mem_0_26.A <= RW0_addr + node RW0_rdata_0_26 = bits(mem_0_26.O, 0, 0) + mem_0_26.I <= bits(RW0_wdata, 26, 26) + mem_0_26.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_26.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 26, 26)), UInt<1>("h1"))) + mem_0_26.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_27.CE <= RW0_clk + mem_0_27.A <= RW0_addr + node RW0_rdata_0_27 = bits(mem_0_27.O, 0, 0) + mem_0_27.I <= bits(RW0_wdata, 27, 27) + mem_0_27.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_27.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 27, 27)), UInt<1>("h1"))) + mem_0_27.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_28.CE <= RW0_clk + mem_0_28.A <= RW0_addr + node RW0_rdata_0_28 = bits(mem_0_28.O, 0, 0) + mem_0_28.I <= bits(RW0_wdata, 28, 28) + mem_0_28.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_28.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 28, 28)), UInt<1>("h1"))) + mem_0_28.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_29.CE <= RW0_clk + mem_0_29.A <= RW0_addr + node RW0_rdata_0_29 = bits(mem_0_29.O, 0, 0) + mem_0_29.I <= bits(RW0_wdata, 29, 29) + mem_0_29.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_29.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 29, 29)), UInt<1>("h1"))) + mem_0_29.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_30.CE <= RW0_clk + mem_0_30.A <= RW0_addr + node RW0_rdata_0_30 = bits(mem_0_30.O, 0, 0) + mem_0_30.I <= bits(RW0_wdata, 30, 30) + mem_0_30.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_30.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 30, 30)), UInt<1>("h1"))) + mem_0_30.CSB <= not(and(RW0_en, UInt<1>("h1"))) + mem_0_31.CE <= RW0_clk + mem_0_31.A <= RW0_addr + node RW0_rdata_0_31 = bits(mem_0_31.O, 0, 0) + mem_0_31.I <= bits(RW0_wdata, 31, 31) + mem_0_31.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_31.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 31, 31)), UInt<1>("h1"))) + mem_0_31.CSB <= not(and(RW0_en, UInt<1>("h1"))) + node RW0_rdata_0 = cat(RW0_rdata_0_31, cat(RW0_rdata_0_30, cat(RW0_rdata_0_29, cat(RW0_rdata_0_28, cat(RW0_rdata_0_27, cat(RW0_rdata_0_26, cat(RW0_rdata_0_25, cat(RW0_rdata_0_24, cat(RW0_rdata_0_23, cat(RW0_rdata_0_22, cat(RW0_rdata_0_21, cat(RW0_rdata_0_20, cat(RW0_rdata_0_19, cat(RW0_rdata_0_18, cat(RW0_rdata_0_17, cat(RW0_rdata_0_16, cat(RW0_rdata_0_15, cat(RW0_rdata_0_14, cat(RW0_rdata_0_13, cat(RW0_rdata_0_12, cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))))))))))))))))))))))) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule my_sram_1rw_64x8 : + input CE : Clock + input A : UInt<6> + input I : UInt<8> + output O : UInt<8> + input CSB : UInt<1> + input OEB : UInt<1> + input WEB : UInt<1> + + defname = my_sram_1rw_64x8 +""" + + compileExecuteAndTest(mem, lib, v, output) +} + class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { val mem = s"mem-RocketChipTest.json" val lib = s"lib-RocketChipTest.json" From 8a30579a3e0f231c2193c5c387bb752d4654d130 Mon Sep 17 00:00:00 2001 From: edwardcwang Date: Mon, 4 Dec 2017 15:12:42 -0800 Subject: [PATCH 103/273] Support firrtl output in command line for MacroCompiler (#28) * Use the given port prefix (fix a bug preventing two unit tests from passing) * Support firrtl output in addition to Verilog --- macros/src/main/scala/MacroCompiler.scala | 40 ++++++++++++++----- macros/src/test/scala/MacroCompilerSpec.scala | 4 +- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index a9025e558..cb03d14f8 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -611,6 +611,7 @@ object MacroCompiler extends App { case object Macros extends MacroParam case object Library extends MacroParam case object Verilog extends MacroParam + case object Firrtl extends MacroParam case object CostFunc extends MacroParam case object Mode extends MacroParam type MacroParamMap = Map[MacroParam, String] @@ -620,6 +621,7 @@ object MacroCompiler extends App { " -m, --macro-list: The set of macros to compile", " -l, --library: The set of macros that have blackbox instances", " -v, --verilog: Verilog output", + " -f, --firrtl: FIRRTL output (optional)", " -c, --cost-func: Cost function to use. Optional (default: \"default\")", " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", """ --mode: @@ -638,6 +640,8 @@ object MacroCompiler extends App { parseArgs(map + (Library -> value), costMap, tail) case ("-v" | "--verilog") :: value :: tail => parseArgs(map + (Verilog -> value), costMap, tail) + case ("-f" | "--firrtl") :: value :: tail => + parseArgs(map + (Firrtl -> value), costMap, tail) case ("-c" | "--cost-func") :: value :: tail => parseArgs(map + (CostFunc -> value), costMap, tail) case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => @@ -655,9 +659,6 @@ object MacroCompiler extends App { try { val macros = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) - // Open the writer for the output Verilog file. - val verilogWriter = new FileWriter(new File(params.get(Verilog).get)) - if (macros.nonEmpty) { // Note: the last macro in the input list is (seemingly arbitrarily) // determined as the firrtl "top-level module". @@ -677,18 +678,39 @@ object MacroCompiler extends App { // Run the compiler. val result = new MacroCompiler().compileAndEmit(state) - // Extract Verilog circuit and write it. - verilogWriter.write(result.getEmittedCircuit.value) - } + // Write output FIRRTL file. + params.get(Firrtl) match { + case Some(firrtlFile: String) => { + val fileWriter = new FileWriter(new File(firrtlFile)) + fileWriter.write(result.circuit.serialize) + fileWriter.close() + } + case None => + } + + // Write output Verilog file. + params.get(Verilog) match { + case Some(verilogFile: String) => { + // Open the writer for the output Verilog file. + val verilogWriter = new FileWriter(new File(verilogFile)) - // Close the writer. - verilogWriter.close() + // Extract Verilog circuit and write it. + verilogWriter.write(result.getEmittedCircuit.value) + + // Close the writer. + verilogWriter.close() + } + case None => + } + } } catch { case e: java.util.NoSuchElementException => println(usage) + e.printStackTrace() sys.exit(1) case e: MacroCompilerException => - System.err.println(e.getMessage) + println(usage) + e.printStackTrace() sys.exit(1) case e: Throwable => throw e diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index b5efbf9d8..e3a72ec2f 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -335,7 +335,7 @@ s""" * @param readEnable Has a read enable port? * @param mask Mask granularity (# bits) of the port or None. */ def generateReadWriteFooterPort(prefix: String, readEnable: Boolean, mask: Option[Int]): String = { - generatePort(libPortPrefix, lib_addr_width, libWidth, + generatePort(prefix, lib_addr_width, libWidth, write=true, writeEnable=true, read=true, readEnable=readEnable, mask) } @@ -344,7 +344,7 @@ s""" * @param readEnable Has a read enable port? * @param mask Mask granularity (# bits) of the port or None. */ def generateReadWriteHeaderPort(prefix: String, readEnable: Boolean, mask: Option[Int]): String = { - generatePort(memPortPrefix, mem_addr_width, memWidth, + generatePort(prefix, mem_addr_width, memWidth, write=true, writeEnable=true, read=true, readEnable=readEnable, mask) } From 79c8c283cc68e6019ab2abe96098b7f6a3363208 Mon Sep 17 00:00:00 2001 From: Adam Izraelevitz Date: Fri, 16 Feb 2018 16:01:10 -0800 Subject: [PATCH 104/273] Add memory compiler to macros (#29) * Add memory compiler to macros * Removed weird spacing * Make sramcompiler width/depth range inclusive * Added sramcompiler test --- macros/src/main/scala/MacroCompiler.scala | 45 ++++--- macros/src/main/scala/Utils.scala | 33 ++++- macros/src/test/scala/MacroCompilerSpec.scala | 120 ++++++++++-------- macros/src/test/scala/SRAMCompiler.scala | 22 ++++ macros/src/test/scala/SimpleSplitDepth.scala | 30 ++--- macros/src/test/scala/SimpleSplitWidth.scala | 26 ++-- mdf | 2 +- 7 files changed, 178 insertions(+), 100 deletions(-) create mode 100644 macros/src/test/scala/SRAMCompiler.scala diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index cb03d14f8..fac8e309a 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -68,7 +68,7 @@ object MacroCompilerAnnotation { * @param costMetric Cost metric to use * @param mode Compiler mode (see CompilerMode) */ - case class Params(mem: String, lib: Option[String], costMetric: CostMetric, mode: CompilerMode) + case class Params(mem: String, lib: Option[String], costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean) /** * Create a MacroCompilerAnnotation. @@ -142,15 +142,15 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } // Make sure we don't have a maskGran larger than the width of the memory. - assert (memPort.src.effectiveMaskGran <= memPort.src.width) - assert (libPort.src.effectiveMaskGran <= libPort.src.width) + assert (memPort.src.effectiveMaskGran <= memPort.src.width.get) + assert (libPort.src.effectiveMaskGran <= libPort.src.width.get) - val libWidth = libPort.src.width + val libWidth = libPort.src.width.get // Don't consider cases of maskGran == width as "masked" since those masks // effectively function as write-enable bits. - val memMask = if (memPort.src.effectiveMaskGran == memPort.src.width) None else memPort.src.maskGran - val libMask = if (libPort.src.effectiveMaskGran == libPort.src.width) None else libPort.src.maskGran + val memMask = if (memPort.src.effectiveMaskGran == memPort.src.width.get) None else memPort.src.maskGran + val libMask = if (libPort.src.effectiveMaskGran == libPort.src.width.get) None else libPort.src.maskGran (memMask, libMask) match { // Neither lib nor mem is masked. @@ -163,12 +163,12 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Only the mem is masked. case (Some(p), None) => { - if (p % libPort.src.width == 0) { + if (p % libPort.src.width.get == 0) { // If the mem mask is a multiple of the lib width, then we're good. // Just roll over every lib width as usual. // e.g. lib width=4, mem maskGran={4, 8, 12, 16, ...} splitMemory(libWidth) - } else if (libPort.src.width % p == 0) { + } else if (libPort.src.width.get % p == 0) { // Lib width is a multiple of the mem mask. // Consider the case where mem mask = 4 but lib width = 8, unmasked. // We can still compile, but will need to waste the extra bits. @@ -176,13 +176,13 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } else { // No neat multiples. // We might still be able to compile extremely inefficiently. - if (p < libPort.src.width) { + if (p < libPort.src.width.get) { // Compile using mem mask as the effective width. (note that lib is not masked) // e.g. mem mask = 3, lib width = 8 splitMemory(memMask.get) } else { // e.g. mem mask = 13, lib width = 8 - System.err.println(s"Unmasked target memory: unaligned mem maskGran ${p} with lib (${lib.src.name}) width ${libPort.src.width} not supported") + System.err.println(s"Unmasked target memory: unaligned mem maskGran ${p} with lib (${lib.src.name}) width ${libPort.src.width.get} not supported") return None } } @@ -378,13 +378,13 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], case Some(PolarizedPort(mem, _)) => /* Palmer: The bits from the outer memory's write mask that will be * used as the write mask for this inner memory. */ - if (libPort.src.effectiveMaskGran == libPort.src.width) { + if (libPort.src.effectiveMaskGran == libPort.src.width.get) { bits(WRef(mem), low / memPort.src.effectiveMaskGran) } else { require(isPowerOfTwo(libPort.src.effectiveMaskGran), "only powers of two masks supported for now") - val effectiveLibWidth = if (memPort.src.maskGran.get < libPort.src.effectiveMaskGran) memPort.src.maskGran.get else libPort.src.width - cat(((0 until libPort.src.width by libPort.src.effectiveMaskGran) map (i => { + val effectiveLibWidth = if (memPort.src.maskGran.get < libPort.src.effectiveMaskGran) memPort.src.maskGran.get else libPort.src.width.get + cat(((0 until libPort.src.width.get by libPort.src.effectiveMaskGran) map (i => { if (memPort.src.maskGran.get < libPort.src.effectiveMaskGran && i >= effectiveLibWidth) { // If the memMaskGran is smaller than the lib's gran, then // zero out the upper bits. @@ -398,7 +398,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], /* If there is a lib mask port but no mem mask port, just turn on * all bits of the lib mask port. */ if (libPort.src.maskPort.isDefined) { - val width = libPort.src.width / libPort.src.effectiveMaskGran + val width = libPort.src.width.get / libPort.src.effectiveMaskGran val value = (BigInt(1) << width.toInt) - 1 UIntLiteral(value, IntWidth(width)) } else { @@ -525,7 +525,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Run the cost function to evaluate this potential compile. costMetric.cost(mem, lib) match { case Some(newCost) => { - System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") + //System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") // Try compiling compile(mem, lib) match { // If it was successful and the new cost is lower @@ -561,7 +561,7 @@ class MacroCompilerTransform extends Transform { def inputForm = MidForm def outputForm = MidForm def execute(state: CircuitState) = getMyAnnotations(state) match { - case Seq(MacroCompilerAnnotation(state.circuit.main, MacroCompilerAnnotation.Params(memFile, libFile, costMetric, mode))) => + case Seq(MacroCompilerAnnotation(state.circuit.main, MacroCompilerAnnotation.Params(memFile, libFile, costMetric, mode, useCompiler))) => if (mode == MacroCompilerAnnotation.FallbackSynflops) { throw new UnsupportedOperationException("Not implemented yet") } @@ -573,7 +573,10 @@ class MacroCompilerTransform extends Transform { } val libs: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { case Some(x:Seq[mdf.macrolib.Macro]) => - Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) + if(useCompiler){ + findSRAMCompiler(Some(x)).map{x => buildSRAMMacros(x).map(new Macro(_)) } + } + else Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) case _ => None } val transforms = Seq( @@ -614,12 +617,14 @@ object MacroCompiler extends App { case object Firrtl extends MacroParam case object CostFunc extends MacroParam case object Mode extends MacroParam + case object UseCompiler extends MacroParam type MacroParamMap = Map[MacroParam, String] type CostParamMap = Map[String, String] val usage = Seq( "Options:", " -m, --macro-list: The set of macros to compile", " -l, --library: The set of macros that have blackbox instances", + " -u, --use-compiler: Flag, whether to use the memory compiler defined in library", " -v, --verilog: Verilog output", " -f, --firrtl: FIRRTL output (optional)", " -c, --cost-func: Cost function to use. Optional (default: \"default\")", @@ -638,6 +643,8 @@ object MacroCompiler extends App { parseArgs(map + (Macros -> value), costMap, tail) case ("-l" | "--library") :: value :: tail => parseArgs(map + (Library -> value), costMap, tail) + case ("-u" | "--use-compiler") :: tail => + parseArgs(map + (UseCompiler -> ""), costMap, tail) case ("-v" | "--verilog") :: value :: tail => parseArgs(map + (Verilog -> value), costMap, tail) case ("-f" | "--firrtl") :: value :: tail => @@ -669,7 +676,8 @@ object MacroCompiler extends App { MacroCompilerAnnotation.Params( params.get(Macros).get, params.get(Library), CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), - MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")) + MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")), + params.contains(UseCompiler) ) )) ) @@ -705,6 +713,7 @@ object MacroCompiler extends App { } } catch { case e: java.util.NoSuchElementException => + e.printStackTrace() println(usage) e.printStackTrace() sys.exit(1) diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 78c5007b6..ded5c53f8 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -18,9 +18,9 @@ class FirrtlMacroPort(port: MacroPort) { val isWriter = port.input.nonEmpty && port.output.isEmpty val isReadWriter = port.input.nonEmpty && port.output.nonEmpty - val addrType = UIntType(IntWidth(ceilLog2(port.depth) max 1)) - val dataType = UIntType(IntWidth(port.width)) - val maskType = UIntType(IntWidth(port.width / port.effectiveMaskGran)) + val addrType = UIntType(IntWidth(ceilLog2(port.depth.get) max 1)) + val dataType = UIntType(IntWidth(port.width.get)) + val maskType = UIntType(IntWidth(port.width.get / port.effectiveMaskGran)) // Bundle representing this macro port. val tpe = BundleType(Seq( @@ -72,6 +72,33 @@ object Utils { case _ => None } } + def findSRAMCompiler(s: Option[Seq[mdf.macrolib.Macro]]): Option[mdf.macrolib.SRAMCompiler] = { + s match { + case Some(l:Seq[mdf.macrolib.Macro]) => + l collectFirst { + case x: mdf.macrolib.SRAMCompiler => x + } + case _ => None + } + } + def buildSRAMMacros(s: mdf.macrolib.SRAMCompiler): Seq[mdf.macrolib.SRAMMacro] = { + for (g <- s.groups; d <- g.depth; w <- g.width; vt <- g.vt) + yield mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), g.extraPorts) + } + def makeName(g: mdf.macrolib.SRAMGroup, depth: Int, width: Int, vt: String): String = { + g.name.foldLeft(""){ (builder, next) => + next match { + case "depth"|"DEPTH" => builder + depth + case "width"|"WIDTH" => builder + width + case "vt" => builder + vt.toLowerCase + case "VT" => builder + vt.toUpperCase + case "family" => builder + g.family.toLowerCase + case "FAMILY" => builder + g.family.toUpperCase + case "mux"|"MUX" => builder + g.mux + case other => builder + other + } + } + } def and(e1: Expression, e2: Expression) = DoPrim(PrimOps.And, Seq(e1, e2), Nil, e1.tpe) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index e3a72ec2f..ade8d6ae3 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -7,6 +7,8 @@ import firrtl.Utils.ceilLog2 import java.io.{File, StringWriter} abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { + import scala.language.implicitConversions + implicit def String2SomeString(i: String): Option[String] = Some(i) val testDir: String = "test_run_dir/macros" new File(testDir).mkdirs // Make sure the testDir exists @@ -32,11 +34,12 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate } } - private def args(mem: String, lib: Option[String], v: String, synflops: Boolean) = + private def args(mem: String, lib: Option[String], v: String, synflops: Boolean, useCompiler: Boolean) = List("-m", mem.toString, "-v", v) ++ (lib match { case None => Nil case Some(l) => List("-l", l.toString) }) ++ costMetricCmdLine ++ - (if (synflops) List("--mode", "synflops") else Nil) + (if (synflops) List("--mode", "synflops") else Nil) ++ + (if (useCompiler) List("--use-compiler") else Nil) // Run the full compiler as if from the command line interface. // Generates the Verilog; useful in testing since an error will throw an @@ -44,12 +47,12 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate def compile(mem: String, lib: String, v: String, synflops: Boolean) { compile(mem, Some(lib), v, synflops) } - def compile(mem: String, lib: Option[String], v: String, synflops: Boolean) { + def compile(mem: String, lib: Option[String], v: String, synflops: Boolean, useCompiler: Boolean = false) { var mem_full = concat(memPrefix, mem) var lib_full = concat(libPrefix, lib) var v_full = concat(vPrefix, v) - MacroCompiler.run(args(mem_full, lib_full, v_full, synflops)) + MacroCompiler.run(args(mem_full, lib_full, v_full, synflops, useCompiler)) } // Helper functions to write macro libraries to the given files. @@ -62,14 +65,11 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate } // Convenience function for running both compile, execute, and test at once. - def compileExecuteAndTest(mem: String, lib: Option[String], v: String, output: String, synflops: Boolean): Unit = { - compile(mem, lib, v, synflops) - val result = execute(mem, lib, synflops) + def compileExecuteAndTest(mem: String, lib: Option[String], v: String, output: String, synflops: Boolean = false, useCompiler: Boolean = false): Unit = { + compile(mem, lib, v, synflops, useCompiler) + val result = execute(mem, lib, synflops, useCompiler) test(result, output) } - def compileExecuteAndTest(mem: String, lib: String, v: String, output: String, synflops: Boolean = false): Unit = { - compileExecuteAndTest(mem, Some(lib), v, output, synflops) - } // Compare FIRRTL outputs after reparsing output with ScalaTest ("should be"). def test(result: Circuit, output: String): Unit = { @@ -79,21 +79,20 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate // Execute the macro compiler and returns a Circuit containing the output of // the memory compiler. - def execute(memFile: String, libFile: Option[String], synflops: Boolean): Circuit = { - execute(Some(memFile), libFile, synflops) - } - def execute(memFile: String, libFile: String, synflops: Boolean): Circuit = { - execute(Some(memFile), Some(libFile), synflops) - } - def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean): Circuit = { + def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean): Circuit = execute(memFile, libFile, synflops, false) + def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean, useCompiler: Boolean): Circuit = { var mem_full = concat(memPrefix, memFile) var lib_full = concat(libPrefix, libFile) require(memFile.isDefined) val mems: Seq[Macro] = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(mem_full)).get map (new Macro(_)) - val libs: Option[Seq[Macro]] = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(lib_full)) match { - case Some(x) => Some(x map (new Macro(_))) - case None => None + val libs: Option[Seq[Macro]] = if(useCompiler) { + Utils.findSRAMCompiler(mdf.macrolib.Utils.readMDFFromPath(lib_full)).map{x => Utils.buildSRAMMacros(x).map(new Macro(_)) } + } else { + Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(lib_full)) match { + case Some(x) => Some(x map (new Macro(_))) + case None => None + } } val macros = mems map (_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) @@ -105,6 +104,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate result } + // Helper method to deal with String + Option[String] private def concat(a: String, b: String): String = {a + "/" + b} private def concat(a: String, b: Option[String]): Option[String] = { @@ -118,12 +118,16 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate // A collection of standard SRAM generators. trait HasSRAMGenerator { import mdf.macrolib._ + import scala.language.implicitConversions + implicit def Int2SomeInt(i: Int): Option[Int] = Some(i) + // Generate a standard (read/write/combo) port for testing. + // Helper methods for optional width argument def generateTestPort( prefix: String, - width: Int, - depth: Int, + width: Option[Int], + depth: Option[Int], maskGran: Option[Int] = None, read: Boolean, readEnable: Boolean = false, @@ -133,55 +137,69 @@ trait HasSRAMGenerator { val realPrefix = if (prefix == "") "" else prefix + "_" MacroPort( - address=PolarizedPort(name=realPrefix + "addr", polarity=ActiveHigh), - clock=PolarizedPort(name=realPrefix + "clk", polarity=PositiveEdge), + address = PolarizedPort(name = realPrefix + "addr", polarity = ActiveHigh), + clock = PolarizedPort(name = realPrefix + "clk", polarity = PositiveEdge), - readEnable=if (readEnable) Some(PolarizedPort(name=realPrefix + "read_en", polarity=ActiveHigh)) else None, - writeEnable=if (writeEnable) Some(PolarizedPort(name=realPrefix + "write_en", polarity=ActiveHigh)) else None, + readEnable = if (readEnable) Some(PolarizedPort(name = realPrefix + "read_en", polarity = ActiveHigh)) else None, + writeEnable = if (writeEnable) Some(PolarizedPort(name = realPrefix + "write_en", polarity = ActiveHigh)) else None, - output=if (read) Some(PolarizedPort(name=realPrefix + "dout", polarity=ActiveHigh)) else None, - input=if (write) Some(PolarizedPort(name=realPrefix + "din", polarity=ActiveHigh)) else None, + output = if (read) Some(PolarizedPort(name = realPrefix + "dout", polarity = ActiveHigh)) else None, + input = if (write) Some(PolarizedPort(name = realPrefix + "din", polarity = ActiveHigh)) else None, - maskPort=maskGran match { - case Some(x:Int) => Some(PolarizedPort(name=realPrefix + "mask", polarity=ActiveHigh)) + maskPort = maskGran match { + case Some(x: Int) => Some(PolarizedPort(name = realPrefix + "mask", polarity = ActiveHigh)) case _ => None }, - maskGran=maskGran, + maskGran = maskGran, - width=width, depth=depth // These numbers don't matter here. + width = width, depth = depth // These numbers don't matter here. ) } // Generate a read port for testing. - def generateReadPort(prefix: String, width: Int, depth: Int, readEnable: Boolean = false): MacroPort = { - generateTestPort(prefix, width, depth, write=false, read=true, readEnable=readEnable) + def generateReadPort(prefix: String, width: Option[Int], depth: Option[Int], readEnable: Boolean = false): MacroPort = { + generateTestPort(prefix, width, depth, write = false, read = true, readEnable = readEnable) } // Generate a write port for testing. - def generateWritePort(prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None, writeEnable: Boolean = true): MacroPort = { - generateTestPort(prefix, width, depth, maskGran=maskGran, write=true, read=false, writeEnable=writeEnable) + def generateWritePort(prefix: String, width: Option[Int], depth: Option[Int], maskGran: Option[Int] = None, writeEnable: Boolean = true): MacroPort = { + generateTestPort(prefix, width, depth, maskGran = maskGran, write = true, read = false, writeEnable = writeEnable) } // Generate a simple read-write port for testing. - def generateReadWritePort(prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None): MacroPort = { + def generateReadWritePort(prefix: String, width: Option[Int], depth: Option[Int], maskGran: Option[Int] = None): MacroPort = { generateTestPort( - prefix, width, depth, maskGran=maskGran, - write=true, writeEnable=true, - read=true, readEnable=false + prefix, width, depth, maskGran = maskGran, + write = true, writeEnable = true, + read = true, readEnable = false ) } // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). def generateSRAM(name: String, prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMMacro = { SRAMMacro( - name=name, - width=width, - depth=depth, - family="1rw", - ports=Seq(generateReadWritePort(prefix, width, depth, maskGran)), - extraPorts=extraPorts + name = name, + width = width, + depth = depth, + family = "1rw", + ports = Seq(generateReadWritePort(prefix, width, depth, maskGran)), + extraPorts = extraPorts ) } + + // Generate a "simple" SRAM group (active high/positive edge, 1 read-write port). + def generateSimpleSRAMGroup(prefix: String, mux: Int, depth: Range, width: Range, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMGroup = { + SRAMGroup(Seq("mygroup_", "width", "x", "depth", "_", "VT"), "1rw", Seq("svt", "lvt", "ulvt"), mux, depth, width, Seq(generateReadWritePort(prefix, None, None, maskGran))) + } + + // 'vt': ('svt','lvt','ulvt'), 'mux': 2, 'depth': range(16,513,8), 'width': range(8,289,2), 'ports': 1 + // 'vt': ('svt','lvt','ulvt'), 'mux': 4, 'depth': range(32,1025,16), 'width': range(4,145), 'ports': 1} + def generateSRAMCompiler(name: String, prefix: String): mdf.macrolib.SRAMCompiler = { + SRAMCompiler(name, Seq( + generateSimpleSRAMGroup(prefix, 2, Range(16, 512, 8), Range(8, 288, 2)), + generateSimpleSRAMGroup(prefix, 4, Range(32, 1024, 16), Range(4, 144, 1)) + )) + } } // Generic "simple" test generator. @@ -192,6 +210,7 @@ trait HasSimpleTestGenerator { // Override these with "override lazy val". // Why lazy? These are used in the constructor here so overriding non-lazily // would be too late. + def useCompiler: Boolean = false def memWidth: Int def libWidth: Int def memDepth: Int @@ -224,10 +243,10 @@ trait HasSimpleTestGenerator { val lib = s"lib-${generatorType}${extraTagPrefixed}.json" val v = s"${generatorType}${extraTagPrefixed}.v" - val mem_name = "target_memory" + lazy val mem_name = "target_memory" val mem_addr_width = ceilLog2(memDepth) - val lib_name = "awesome_lib_mem" + lazy val lib_name = "awesome_lib_mem" val lib_addr_width = ceilLog2(libDepth) // Override these to change the port prefixes if needed. @@ -258,8 +277,8 @@ trait HasSimpleTestGenerator { // Number of width bits in the last width-direction memory. // e.g. if memWidth = 16 and libWidth = 8, this would be 8 since the last memory 0_1 has 8 bits of input width. // e.g. if memWidth = 9 and libWidth = 8, this would be 1 since the last memory 0_1 has 1 bit of input width. - val lastWidthBits = if (memWidth % usableLibWidth == 0) usableLibWidth else (memWidth % usableLibWidth) - val selectBits = mem_addr_width - lib_addr_width + lazy val lastWidthBits = if (memWidth % usableLibWidth == 0) usableLibWidth else (memWidth % usableLibWidth) + lazy val selectBits = mem_addr_width - lib_addr_width /** * Convenience function to generate a mask statement. @@ -410,3 +429,4 @@ trait HasNoLibTestGenerator extends HasSimpleTestGenerator { // If there is no lib, don't generate a body. override def generateBody = "" } + diff --git a/macros/src/test/scala/SRAMCompiler.scala b/macros/src/test/scala/SRAMCompiler.scala new file mode 100644 index 000000000..ea6667e9f --- /dev/null +++ b/macros/src/test/scala/SRAMCompiler.scala @@ -0,0 +1,22 @@ +package barstools.macros + +import mdf.macrolib._ + +class SRAMCompiler extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { + val compiler = generateSRAMCompiler("awesome", "A") + val verilog = s"v-SRAMCompiler.v" + override lazy val depth = 16 + override lazy val memWidth = 8 + override lazy val libWidth = 8 + override lazy val mem_name = "mymem" + override lazy val memPortPrefix = "X" + override lazy val lib_name = "mygroup_8x16_SVT" + override lazy val libPortPrefix = "A" + + writeToLib(lib, Seq(compiler)) + + + writeToMem(mem, Seq(generateSRAM("mymem", "X", 8, 16))) + + compileExecuteAndTest(mem, Some(lib), verilog, output=output, false, true) +} diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 9d1c6dcd9..6a623447f 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -20,27 +20,27 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { if (selectBits > 0) { output.append ( s""" - node outer_addr_sel = bits(outer_addr, ${mem_addr_width - 1}, $lib_addr_width) - reg outer_addr_sel_reg : UInt<${selectBits}>, outer_clk with : - reset => (UInt<1>("h0"), outer_addr_sel_reg) - outer_addr_sel_reg <= mux(UInt<1>("h1"), outer_addr_sel, outer_addr_sel_reg) + node ${memPortPrefix}_addr_sel = bits(${memPortPrefix}_addr, ${mem_addr_width - 1}, $lib_addr_width) + reg ${memPortPrefix}_addr_sel_reg : UInt<${selectBits}>, ${memPortPrefix}_clk with : + reset => (UInt<1>("h0"), ${memPortPrefix}_addr_sel_reg) + ${memPortPrefix}_addr_sel_reg <= mux(UInt<1>("h1"), ${memPortPrefix}_addr_sel, ${memPortPrefix}_addr_sel_reg) """ ) } for (i <- 0 to depthInstances - 1) { val maskStatement = generateMaskStatement(0, i) - val enableIdentifier = if (selectBits > 0) s"""eq(outer_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" + val enableIdentifier = if (selectBits > 0) s"""eq(${memPortPrefix}_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" output.append( s""" - inst mem_${i}_0 of awesome_lib_mem - mem_${i}_0.lib_clk <= outer_clk - mem_${i}_0.lib_addr <= outer_addr - node outer_dout_${i}_0 = bits(mem_${i}_0.lib_dout, ${width - 1}, 0) - mem_${i}_0.lib_din <= bits(outer_din, ${width - 1}, 0) + inst mem_${i}_0 of ${lib_name} + mem_${i}_0.${libPortPrefix}_clk <= ${memPortPrefix}_clk + mem_${i}_0.${libPortPrefix}_addr <= ${memPortPrefix}_addr + node ${memPortPrefix}_dout_${i}_0 = bits(mem_${i}_0.${libPortPrefix}_dout, ${width - 1}, 0) + mem_${i}_0.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${width - 1}, 0) ${maskStatement} - mem_${i}_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), ${enableIdentifier}) - node outer_dout_${i} = outer_dout_${i}_0 + mem_${i}_0.${libPortPrefix}_write_en <= and(and(${memPortPrefix}_write_en, UInt<1>("h1")), ${enableIdentifier}) + node ${memPortPrefix}_dout_${i} = ${memPortPrefix}_dout_${i}_0 """ ) } @@ -48,16 +48,16 @@ s""" if (i > depthInstances - 1) { "UInt<1>(\"h0\")" } else { - "mux(eq(outer_addr_sel_reg, UInt<%d>(\"h%s\")), outer_dout_%d, %s)".format( + s"""mux(eq(${memPortPrefix}_addr_sel_reg, UInt<%d>("h%s")), ${memPortPrefix}_dout_%d, %s)""".format( selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, depthInstances) ) } } - output append " outer_dout <= " + output append s" ${memPortPrefix}_dout <= " if (selectBits > 0) { output append generate_outer_dout_tree(0, depthInstances) } else { - output append """mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0"))""" + output append s"""mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<1>("h0"))""" } output.toString diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index f9835bd4a..1096e4178 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -40,28 +40,28 @@ trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { } else """UInt<1>("h1")""" s""" - mem_0_${i}.lib_clk <= outer_clk - mem_0_${i}.lib_addr <= outer_addr - node outer_dout_0_${i} = bits(mem_0_${i}.lib_dout, ${myMemWidth - 1}, 0) - mem_0_${i}.lib_din <= bits(outer_din, ${myBaseBit + myMemWidth - 1}, ${myBaseBit}) + mem_0_${i}.${libPortPrefix}_clk <= ${memPortPrefix}_clk + mem_0_${i}.${libPortPrefix}_addr <= ${memPortPrefix}_addr + node ${memPortPrefix}_dout_0_${i} = bits(mem_0_${i}.${libPortPrefix}_dout, ${myMemWidth - 1}, 0) + mem_0_${i}.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${myBaseBit + myMemWidth - 1}, ${myBaseBit}) ${maskStatement} - mem_0_${i}.lib_write_en <= and(and(outer_write_en, ${writeEnableBit}), UInt<1>("h1")) + mem_0_${i}.${libPortPrefix}_write_en <= and(and(${memPortPrefix}_write_en, ${writeEnableBit}), UInt<1>("h1")) """ }).reduceLeft(_ + _) // Generate final output that concats together the sub-memories. // e.g. cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0)) output append { - val doutStatements = ((widthInstances - 1 to 0 by -1) map (i => s"outer_dout_0_${i}")) + val doutStatements = ((widthInstances - 1 to 0 by -1) map (i => s"${memPortPrefix}_dout_0_${i}")) val catStmt = doutStatements.init.foldRight(doutStatements.last)((l: String, r: String) => s"cat($l, $r)") s""" - node outer_dout_0 = ${catStmt} + node ${memPortPrefix}_dout_0 = ${catStmt} """ } output append -""" - outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) +s""" + ${memPortPrefix}_dout <= mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<1>("h0")) """ output.toString } @@ -398,7 +398,7 @@ class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGen depth=libDepth, family="1rw", ports=Seq(generateTestPort( - "lib", libWidth, libDepth, maskGran=libMaskGran, + "lib", Some(libWidth), Some(libDepth), maskGran=libMaskGran, write=true, writeEnable=true, read=true, readEnable=true )) @@ -456,7 +456,7 @@ class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGen depth=memDepth, family="1rw", ports=Seq(generateTestPort( - "outer", memWidth, memDepth, maskGran=memMaskGran, + "outer", Some(memWidth), Some(memDepth), maskGran=memMaskGran, write=true, writeEnable=true, read=true, readEnable=true )) @@ -482,7 +482,7 @@ class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAM depth=libDepth, family="1rw", ports=Seq(generateTestPort( - "lib", libWidth, libDepth, maskGran=libMaskGran, + "lib", Some(libWidth), Some(libDepth), maskGran=libMaskGran, write=true, writeEnable=true, read=true, readEnable=true )) @@ -496,7 +496,7 @@ class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAM depth=memDepth, family="1rw", ports=Seq(generateTestPort( - "outer", memWidth, memDepth, maskGran=memMaskGran, + "outer", Some(memWidth), Some(memDepth), maskGran=memMaskGran, write=true, writeEnable=true, read=true, readEnable=true )) diff --git a/mdf b/mdf index 2b5f3c16d..2bc5a363e 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 2b5f3c16daac6cd6eb9ed6aa2b9d836cd6e0648c +Subproject commit 2bc5a363e23e0276e8e33115e7d1a06c62e774ad From 1ccd8f6dbc8115b45a1d932f8ebfe014cf5a5334 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Fri, 16 Feb 2018 16:03:05 -0800 Subject: [PATCH 105/273] Bump mdf to match master --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index 2bc5a363e..ee50cc2b0 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 2bc5a363e23e0276e8e33115e7d1a06c62e774ad +Subproject commit ee50cc2b096c5d7f64afdd9a54db40a9cc2ca484 From f7634b82cddad5f02e8e3f79bdd6ad72763f808b Mon Sep 17 00:00:00 2001 From: edwardcwang Date: Wed, 21 Mar 2018 14:50:18 -0700 Subject: [PATCH 106/273] Include macro compiler JAR compilation instructions --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index bc6f3d9b9..935dc6ad1 100644 --- a/README.md +++ b/README.md @@ -15,3 +15,17 @@ sbt > project tapeout > run-main barstools.tapeout.transforms.GenerateTop -i .fir -o .v --syn-top --harness-top ``` +Building the macro compiler JAR: +``` +$ sbt +[...] +[info] Set current project to root (in build file:/mnt/data/dev/barstools_pcad/) +> project macros +[info] Set current project to macros (in build file:/mnt/data/dev/barstools_pcad/) +> assembly +[...] +[info] SHA-1: 77d4c759c825fd0ea93dfec26dbbb649f6cd5c89 +[info] Packaging [...]/macros/target/scala-2.11/macros-assembly-0.1-SNAPSHOT.jar ... +[info] Done packaging. +[success] Total time: 28 s, completed Mar 21, 2018 2:28:34 PM +``` From 93bf7895bee4fe866ede244e91da9514bb321087 Mon Sep 17 00:00:00 2001 From: edwardcwang Date: Thu, 26 Apr 2018 10:33:55 -0700 Subject: [PATCH 107/273] Fix corner case in compiling a small mem using a large lib (#32) * Refactor bit pairs calculation into a separate function * Minor clarifications * Clarify MacroCompilerSpec helpers * Add SmallTagArrayTest test * Fix corner case in compiling a small mem using a large lib --- macros/src/main/scala/MacroCompiler.scala | 99 +++++++++++++------ macros/src/test/scala/MacroCompilerSpec.scala | 89 ++++++++++------- macros/src/test/scala/SpecificExamples.scala | 34 +++++++ 3 files changed, 155 insertions(+), 67 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index fac8e309a..ad38d3449 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -102,17 +102,20 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], }) } - def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { + /** + * Calculate bit pairs. + * This is a list of submemories by width. + * The tuples are (lsb, msb) inclusive. + * Example: (0, 7) and (8, 15) might be a split for a width=16 memory into two width=8 target memories. + * Another example: (0, 3), (4, 7), (8, 11) may be a split for a width-12 memory into 3 width-4 target memories. + * + * @param mem Memory to compile + * @param lib Lib to compile with + * @return Bit pairs or empty list if there was an error. + */ + private def calculateBitPairs(mem: Macro, lib: Macro): Seq[(BigInt, BigInt)] = { val pairedPorts = mem.sortedPorts zip lib.sortedPorts - // Width mapping - - /** - * This is a list of submemories by width. - * The tuples are (lsb, msb) inclusive. - * e.g. (0, 7) and (8, 15) might be a split for a width=16 memory into two - * width=8 memories. - */ val bitPairs = ArrayBuffer[(BigInt, BigInt)]() var currentLSB: BigInt = 0 @@ -133,7 +136,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Helper function to check if it's time to split memories. // @param effectiveLibWidth Split memory when we have this many bits. def splitMemory(effectiveLibWidth: Int): Unit = { - assert (!alreadySplit) + assert(!alreadySplit) if (bitsInCurrentMem == effectiveLibWidth) { bitPairCandidates += ((currentLSB, memBit - 1)) @@ -142,8 +145,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } // Make sure we don't have a maskGran larger than the width of the memory. - assert (memPort.src.effectiveMaskGran <= memPort.src.width.get) - assert (libPort.src.effectiveMaskGran <= libPort.src.width.get) + assert(memPort.src.effectiveMaskGran <= memPort.src.width.get) + assert(libPort.src.effectiveMaskGran <= libPort.src.width.get) val libWidth = libPort.src.width.get @@ -182,8 +185,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], splitMemory(memMask.get) } else { // e.g. mem mask = 13, lib width = 8 - System.err.println(s"Unmasked target memory: unaligned mem maskGran ${p} with lib (${lib.src.name}) width ${libPort.src.width.get} not supported") - return None + System.err.println(s"Unmasked target memory: unaligned mem maskGran $p with lib (${lib.src.name}) width ${libPort.src.width.get} not supported") + return Seq() } } } @@ -199,8 +202,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Mem maskGran is a multiple of lib maskGran, carry on as normal. splitMemory(libWidth) } else { - System.err.println(s"Mem maskGran ${m} is not a multiple of lib maskGran ${l}: currently not supported") - return None + System.err.println(s"Mem maskGran $m is not a multiple of lib maskGran $l: currently not supported") + return Seq() } } else { // m < l // Lib maskGran > mem maskGran. @@ -218,8 +221,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // of treating it as simply a width 4 (!!!) memory. // This would require a major refactor though. } else { - System.err.println(s"Lib maskGran ${m} is not a multiple of mem maskGran ${l}: currently not supported") - return None + System.err.println(s"Lib maskGran $m is not a multiple of mem maskGran $l: currently not supported") + return Seq() } } } @@ -228,7 +231,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Choose an actual bit pair to add. // We'll have to choose the smallest one (e.g. unmasked read port might be more tolerant of a bigger split than the masked write port). - if (bitPairCandidates.length == 0) { + if (bitPairCandidates.isEmpty) { // No pair needed to split, just continue } else { val bestPair = bitPairCandidates.reduceLeft((leftPair, rightPair) => { @@ -240,7 +243,22 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } // Add in the last chunk if there are any leftovers bitPairs += ((currentLSB, mem.src.width.toInt - 1)) - // Check bit pairs + + bitPairs.toSeq + } + + def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { + assert(mem.sortedPorts.lengthCompare(lib.sortedPorts.length) == 0, + "mem and lib should have an equal number of ports") + val pairedPorts = mem.sortedPorts zip lib.sortedPorts + + // Width mapping. See calculateBitPairs. + val bitPairs: Seq[(BigInt, BigInt)] = calculateBitPairs(mem, lib) + if (bitPairs.isEmpty) { + System.err.println("Error occurred during bitPairs calculations (bitPairs is empty).") + return None + } + // Check bit pairs. checkBitPairs(bitPairs) // Depth mapping @@ -278,8 +296,9 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], for ((off, i) <- (0 until mem.src.depth by lib.src.depth).zipWithIndex) { for (j <- bitPairs.indices) { val name = s"mem_${i}_${j}" + // Create the instance. stmts += WDefInstance(NoInfo, name, lib.src.name, lib.tpe) - // connect extra ports + // Connect extra ports of the lib. stmts ++= lib.extraPorts map { case (portName, portValue) => Connect(NoInfo, WSubField(WRef(name), portName), portValue) } @@ -383,14 +402,29 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } else { require(isPowerOfTwo(libPort.src.effectiveMaskGran), "only powers of two masks supported for now") - val effectiveLibWidth = if (memPort.src.maskGran.get < libPort.src.effectiveMaskGran) memPort.src.maskGran.get else libPort.src.width.get + // How much of this lib's width we are effectively using. + // If we have a mem maskGran less than the lib's maskGran, we'll have to take the smaller maskGran. + // Example: if we have a lib whose maskGran is 8 but our mem's maskGran is 4. + // The other case is if we're using a larger lib than mem. + val usingLessThanLibMaskGran = (memPort.src.maskGran.get < libPort.src.effectiveMaskGran) + val effectiveLibWidth = if (usingLessThanLibMaskGran) + memPort.src.maskGran.get + else + libPort.src.width.get + cat(((0 until libPort.src.width.get by libPort.src.effectiveMaskGran) map (i => { - if (memPort.src.maskGran.get < libPort.src.effectiveMaskGran && i >= effectiveLibWidth) { + if (usingLessThanLibMaskGran && i >= effectiveLibWidth) { // If the memMaskGran is smaller than the lib's gran, then // zero out the upper bits. zero } else { - bits(WRef(mem), (low + i) / memPort.src.effectiveMaskGran) + if (i >= memPort.src.width.get) { + // If our bit is larger than the whole width of the mem, just zero out the upper bits. + zero + } else { + // Pick the appropriate bit from the mem mask. + bits(WRef(mem), (low + i) / memPort.src.effectiveMaskGran) + } } })).reverse) } @@ -589,9 +623,11 @@ class MacroCompilerTransform extends Transform { // FIXME: Use firrtl.LowerFirrtlOptimizations class MacroCompilerOptimizations extends SeqTransform { - def inputForm = LowForm - def outputForm = LowForm - def transforms = Seq( + def inputForm: CircuitForm = LowForm + + def outputForm: CircuitForm = LowForm + + def transforms: Seq[Transform] = Seq( passes.RemoveValidIf, new firrtl.transforms.ConstantPropagation, passes.memlib.VerilogMemDelays, @@ -602,11 +638,12 @@ class MacroCompilerOptimizations extends SeqTransform { } class MacroCompiler extends Compiler { - def emitter = new VerilogEmitter - def transforms = + def emitter: Emitter = new VerilogEmitter + + def transforms: Seq[Transform] = Seq(new MacroCompilerTransform) ++ - getLoweringTransforms(firrtl.HighForm, firrtl.LowForm) ++ - Seq(new MacroCompilerOptimizations) + getLoweringTransforms(firrtl.HighForm, firrtl.LowForm) ++ + Seq(new MacroCompilerOptimizations) } object MacroCompiler extends App { diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index ade8d6ae3..40c613ed8 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -6,6 +6,8 @@ import firrtl.Parser.parse import firrtl.Utils.ceilLog2 import java.io.{File, StringWriter} +import mdf.macrolib.SRAMMacro + abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { import scala.language.implicitConversions implicit def String2SomeString(i: String): Option[String] = Some(i) @@ -228,7 +230,7 @@ trait HasSimpleTestGenerator { // generator. def generatorType: String = this.getClass.getSimpleName - require (memDepth >= libDepth) + //require (memDepth >= libDepth) // Convenience variables to check if a mask exists. val memHasMask = memMaskGran != None @@ -258,11 +260,14 @@ trait HasSimpleTestGenerator { def generateLibSRAM() = generateSRAM(lib_name, libPortPrefix, libWidth, libDepth, libMaskGran, extraPorts) def generateMemSRAM() = generateSRAM(mem_name, memPortPrefix, memWidth, memDepth, memMaskGran) - val libSRAM = generateLibSRAM - val memSRAM = generateMemSRAM + def libSRAM = generateLibSRAM + def memSRAM = generateMemSRAM + + def libSRAMs: Seq[SRAMMacro] = Seq(libSRAM) + def memSRAMs: Seq[SRAMMacro] = Seq(memSRAM) - writeToLib(lib, Seq(libSRAM)) - writeToMem(mem, Seq(memSRAM)) + writeToLib(lib, libSRAMs) + writeToMem(mem, memSRAMs) // For masks, width it's a bit tricky since we have to consider cases like // memMaskGran = 4 and libMaskGran = 8. @@ -321,41 +326,52 @@ trait HasSimpleTestGenerator { } /** Helper function to generate a port. - * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") - * @param addrWidth Address port width - * @param width data width - * @param write Has a write port? - * @param writeEnable Has a write enable port? - * @param read Has a read port? - * @param readEnable Has a read enable port? - * @param mask Mask granularity (# bits) of the port or None. */ - def generatePort(prefix: String, addrWidth: Int, width: Int, write: Boolean, writeEnable: Boolean, read: Boolean, readEnable: Boolean, mask: Option[Int]): String = { - val readStr = if (read) s"output ${prefix}_dout : UInt<$width>" else "" - val writeStr = if (write) s"input ${prefix}_din : UInt<$width>" else "" - val readEnableStr = if (readEnable) s"input ${prefix}_read_en : UInt<1>" else "" - val writeEnableStr = if (writeEnable) s"input ${prefix}_write_en : UInt<1>" else "" + * + * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") + * @param addrWidth Address port width + * @param width data width + * @param write Has a write port? + * @param writeEnable Has a write enable port? + * @param read Has a read port? + * @param readEnable Has a read enable port? + * @param mask Mask granularity (# bits) of the port or None. + * @param extraPorts Extra ports (name, # bits) + */ + def generatePort(prefix: String, addrWidth: Int, width: Int, write: Boolean, writeEnable: Boolean, read: Boolean, readEnable: Boolean, mask: Option[Int], extraPorts: Seq[(String, Int)] = Seq()): String = { + val realPrefix = if (prefix == "") "" else prefix + "_" + + val readStr = if (read) s"output ${realPrefix}dout : UInt<$width>" else "" + val writeStr = if (write) s"input ${realPrefix}din : UInt<$width>" else "" + val readEnableStr = if (readEnable) s"input ${realPrefix}read_en : UInt<1>" else "" + val writeEnableStr = if (writeEnable) s"input ${realPrefix}write_en : UInt<1>" else "" val maskStr = mask match { - case Some(maskBits: Int) => s"input ${prefix}_mask : UInt<${maskBits}>" + case Some(maskBits: Int) => s"input ${realPrefix}mask : UInt<$maskBits>" case _ => "" } -s""" - input ${prefix}_clk : Clock - input ${prefix}_addr : UInt<$addrWidth> - ${writeStr} - ${readStr} - ${readEnableStr} - ${writeEnableStr} - ${maskStr} -""" + val extraPortsStr = extraPorts.map { case (name, bits) => s" input $name : UInt<$bits>" }.mkString("\n") + s""" + input ${realPrefix}clk : Clock + input ${realPrefix}addr : UInt<$addrWidth> + $writeStr + $readStr + $readEnableStr + $writeEnableStr + $maskStr +$extraPortsStr + """ } - /** Helper function to generate a RW footer port. - * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") - * @param readEnable Has a read enable port? - * @param mask Mask granularity (# bits) of the port or None. */ - def generateReadWriteFooterPort(prefix: String, readEnable: Boolean, mask: Option[Int]): String = { + /** + * Helper function to generate a RW footer port. + * + * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") + * @param readEnable Has a read enable port? + * @param mask Mask granularity (# bits) of the port or None. + * @param extraPorts Extra ports (name, # bits) + */ + def generateReadWriteFooterPort(prefix: String, readEnable: Boolean, mask: Option[Int], extraPorts: Seq[(String, Int)] = Seq()): String = { generatePort(prefix, lib_addr_width, libWidth, - write=true, writeEnable=true, read=true, readEnable=readEnable, mask) + write = true, writeEnable = true, read = true, readEnable = readEnable, mask = mask, extraPorts = extraPorts) } /** Helper function to generate a RW header port. @@ -385,8 +401,9 @@ ${generateHeaderPorts} // Generate the target memory ports. def generateFooterPorts(): String = { - require (libSRAM.ports.size == 1, "Footer generator only supports single RW port mem") - generateReadWriteFooterPort(libPortPrefix, libSRAM.ports(0).readEnable.isDefined, if (libHasMask) Some(libMaskBits) else None) + require(libSRAM.ports.size == 1, "Footer generator only supports single RW port mem") + generateReadWriteFooterPort(libPortPrefix, libSRAM.ports(0).readEnable.isDefined, + if (libHasMask) Some(libMaskBits) else None, extraPorts.map(p => (p.name, p.width))) } // Generate the footer (contains the target memory extmodule declaration by default). diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 338569d66..2ca1ddf03 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -1,3 +1,4 @@ +// See LICENSE for license details. package barstools.macros import mdf.macrolib._ @@ -1232,6 +1233,39 @@ circuit smem_0_ext : compileExecuteAndTest(mem, lib, v, output) } +class SmallTagArrayTest extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleTestGenerator { + // Test that mapping a smaller memory using a larger lib can still work. + override def memWidth: Int = 26 + override def memDepth: Int = 2 + override def memMaskGran: Option[Int] = Some(26) + override def memPortPrefix: String = "" + + override def libWidth: Int = 32 + override def libDepth: Int = 64 + override def libMaskGran: Option[Int] = Some(1) + override def libPortPrefix: String = "" + + override def extraPorts: Seq[MacroExtraPort] = Seq( + MacroExtraPort(name = "must_be_one", portType = Constant, width = 1, value = 1) + ) + + override def generateBody(): String = + s""" + | inst mem_0_0 of $lib_name + | mem_0_0.must_be_one <= UInt<1>("h1") + | mem_0_0.clk <= clk + | mem_0_0.addr <= addr + | node dout_0_0 = bits(mem_0_0.dout, 25, 0) + | mem_0_0.din <= bits(din, 25, 0) + | mem_0_0.mask <= cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), bits(mask, 0, 0)))))))))))))))))))))))))))))))) + | mem_0_0.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + | node dout_0 = dout_0_0 + | dout <= mux(UInt<1>("h1"), dout_0, UInt<1>("h0")) + """.stripMargin + + compileExecuteAndTest(mem, lib, v, output) +} + class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { val mem = s"mem-RocketChipTest.json" val lib = s"lib-RocketChipTest.json" From 74ca2bc491b62ba96b771405f46416278216664b Mon Sep 17 00:00:00 2001 From: edwardcwang Date: Wed, 31 Oct 2018 13:47:28 -0700 Subject: [PATCH 108/273] Remove deprecated run-main --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 935dc6ad1..ae5f04823 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Example Usage: sbt > compile > project tapeout -> run-main barstools.tapeout.transforms.GenerateTop -i .fir -o .v --syn-top --harness-top +> runMain barstools.tapeout.transforms.GenerateTop -i .fir -o .v --syn-top --harness-top ``` Building the macro compiler JAR: ``` From d1c1b3fba6b98616fcf07ff2644fc13404bf1e15 Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Wed, 25 Apr 2018 23:28:42 -0700 Subject: [PATCH 109/273] Overhaul CompilerMode parsing --- macros/src/main/scala/MacroCompiler.scala | 43 ++++++++++++++--------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index ad38d3449..df97e611d 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -39,6 +39,8 @@ object MacroCompilerAnnotation { case object Strict extends CompilerMode /** Synflops mode - compile all memories with synflops (do not map to lib at all). */ case object Synflops extends CompilerMode + /** CompileAndSynflops mode - compile all memories and create mock versions of the target libs with synflops. */ + case object CompileAndSynflops extends CompilerMode /** FallbackSynflops - compile all memories to SRAM when possible and fall back to synflops if a memory fails. **/ case object FallbackSynflops extends CompilerMode /** CompileAvailable - compile what is possible and do nothing with uncompiled memories. **/ @@ -51,14 +53,19 @@ object MacroCompilerAnnotation { */ val Default = CompileAvailable + // Options as list of (CompilerMode, command-line name, description) + val options: Seq[(CompilerMode, String, String)] = Seq( + (Strict, "strict", "Compile all memories to library or return an error."), + (Synflops, "synflops", "Produces synthesizable flop-based memories for all memories (do not map to lib at all); likely useful for simulation purposes."), + (CompileAndSynflops, "compileandsynflops", "Compile all memories and create mock versions of the target libs with synflops; likely also useful for simulation purposes."), + (FallbackSynflops, "fallbacksynflops", "Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible."), + (CompileAvailable, "compileavailable", "Compile all memories to library when possible and do nothing in case of errors. (default)") + ) + /** Helper function to select a compiler mode. */ - def stringToCompilerMode(str: String): CompilerMode = (str: @unchecked) match { - case "strict" => Strict - case "synflops" => Synflops - case "fallbacksynflops" => FallbackSynflops - case "compileavailable" => CompileAvailable - case "default" => Default - case _ => throw new IllegalArgumentException("No such compiler mode " + str) + def stringToCompilerMode(str: String): CompilerMode = options.collectFirst { case (mode, cmd, _) if cmd == str => mode } match { + case Some(x) => x + case None => throw new IllegalArgumentException("No such compiler mode " + str) } /** @@ -614,8 +621,13 @@ class MacroCompilerTransform extends Transform { case _ => None } val transforms = Seq( - new MacroCompilerPass(mems, libs, costMetric, mode), - new SynFlopsPass(mode == MacroCompilerAnnotation.Synflops, libs getOrElse mems.get)) + new MacroCompilerPass(mems, if (mode != MacroCompilerAnnotation.Synflops) libs else None, costMetric, mode), + new SynFlopsPass(mode == MacroCompilerAnnotation.Synflops || mode == MacroCompilerAnnotation.CompileAndSynflops, + if (mode == MacroCompilerAnnotation.CompileAndSynflops) { + libs.get + } else { + mems.get + })) (transforms foldLeft state)((s, xform) => xform runTransform s).copy(form=outputForm) case _ => state } @@ -655,9 +667,12 @@ object MacroCompiler extends App { case object CostFunc extends MacroParam case object Mode extends MacroParam case object UseCompiler extends MacroParam + type MacroParamMap = Map[MacroParam, String] type CostParamMap = Map[String, String] - val usage = Seq( + val modeOptions: Seq[String] = MacroCompilerAnnotation.options + .map { case (_, cmd, description) => s" $cmd: $description" } + val usage: String = (Seq( "Options:", " -m, --macro-list: The set of macros to compile", " -l, --library: The set of macros that have blackbox instances", @@ -666,12 +681,8 @@ object MacroCompiler extends App { " -f, --firrtl: FIRRTL output (optional)", " -c, --cost-func: Cost function to use. Optional (default: \"default\")", " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", - """ --mode: - | strict: Compile all memories to library or return an error. - | synflops: Produces synthesizable flop-based memories (for all memories and library memory macros); likely useful for simulation purposes. - | fallbacksynflops: Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible. - | compileavailable: Compile all memories to library when possible and do nothing in case of errors. (default) - """.stripMargin) mkString "\n" + " --mode:" + ) ++ modeOptions) mkString "\n" def parseArgs(map: MacroParamMap, costMap: CostParamMap, args: List[String]): (MacroParamMap, CostParamMap) = args match { From 4727d475c762d0b5eb93f3128b2e4097f7dc501f Mon Sep 17 00:00:00 2001 From: Edward Wang Date: Thu, 26 Apr 2018 10:33:05 -0700 Subject: [PATCH 110/273] Add options to force certain memories to lib or synflops --- macros/src/main/scala/MacroCompiler.scala | 91 ++++++++++++++++------- 1 file changed, 63 insertions(+), 28 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index df97e611d..62bf518f2 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -55,6 +55,7 @@ object MacroCompilerAnnotation { // Options as list of (CompilerMode, command-line name, description) val options: Seq[(CompilerMode, String, String)] = Seq( + (Default, "default", "Select the default option from below."), (Strict, "strict", "Compile all memories to library or return an error."), (Synflops, "synflops", "Produces synthesizable flop-based memories for all memories (do not map to lib at all); likely useful for simulation purposes."), (CompileAndSynflops, "compileandsynflops", "Compile all memories and create mock versions of the target libs with synflops; likely also useful for simulation purposes."), @@ -69,13 +70,17 @@ object MacroCompilerAnnotation { } /** - * Parameters associated to this MacroCompilerAnnotation. - * @param mem Path to memory lib - * @param lib Path to library lib or None if no libraries - * @param costMetric Cost metric to use - * @param mode Compiler mode (see CompilerMode) - */ - case class Params(mem: String, lib: Option[String], costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean) + * Parameters associated to this MacroCompilerAnnotation. + * + * @param mem Path to memory lib + * @param lib Path to library lib or None if no libraries + * @param costMetric Cost metric to use + * @param mode Compiler mode (see CompilerMode) + * @param forceCompile Set of memories to force compiling to lib regardless of the mode + * @param forceSynflops Set of memories to force compiling as flops regardless of the mode + */ + case class Params(mem: String, lib: Option[String], costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean, + forceCompile: Set[String], forceSynflops: Set[String]) /** * Create a MacroCompilerAnnotation. @@ -601,11 +606,17 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], class MacroCompilerTransform extends Transform { def inputForm = MidForm def outputForm = MidForm + def execute(state: CircuitState) = getMyAnnotations(state) match { - case Seq(MacroCompilerAnnotation(state.circuit.main, MacroCompilerAnnotation.Params(memFile, libFile, costMetric, mode, useCompiler))) => + case Seq(MacroCompilerAnnotation(state.circuit.main, + MacroCompilerAnnotation.Params(memFile, libFile, costMetric, mode, useCompiler, forceCompile, forceSynflops))) => if (mode == MacroCompilerAnnotation.FallbackSynflops) { throw new UnsupportedOperationException("Not implemented yet") } + + // Check that we don't have any modules both forced to compile and synflops. + assert((forceCompile intersect forceSynflops).isEmpty, "Cannot have modules both forced to compile and synflops") + // Read, eliminate None, get only SRAM, make firrtl macro val mems: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) match { case Some(x:Seq[mdf.macrolib.Macro]) => @@ -620,15 +631,30 @@ class MacroCompilerTransform extends Transform { else Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) case _ => None } + + // Helper function to turn a set of mem names into a Seq[Macro]. + def setToSeqMacro(names: Set[String]): Seq[Macro] = { + names.toSeq.map(memName => mems.get.collectFirst { case m if m.src.name == memName => m }.get) + } + + // Build lists of memories for compilation and synflops. + val memCompile = mems.map { actualMems => + val memsAdjustedForMode = if (mode == MacroCompilerAnnotation.Synflops) Seq.empty else actualMems + memsAdjustedForMode.filterNot(m => forceSynflops.contains(m.src.name)) ++ setToSeqMacro(forceCompile) + } + val memSynflops: Seq[Macro] = mems.map { actualMems => + val memsAdjustedForMode = if (mode == MacroCompilerAnnotation.Synflops) actualMems else Seq.empty + memsAdjustedForMode.filterNot(m => forceCompile.contains(m.src.name)) ++ setToSeqMacro(forceSynflops) + }.getOrElse(Seq.empty) + val transforms = Seq( - new MacroCompilerPass(mems, if (mode != MacroCompilerAnnotation.Synflops) libs else None, costMetric, mode), - new SynFlopsPass(mode == MacroCompilerAnnotation.Synflops || mode == MacroCompilerAnnotation.CompileAndSynflops, - if (mode == MacroCompilerAnnotation.CompileAndSynflops) { - libs.get - } else { - mems.get - })) - (transforms foldLeft state)((s, xform) => xform runTransform s).copy(form=outputForm) + new MacroCompilerPass(memCompile, libs, costMetric, mode), + new SynFlopsPass(true, memSynflops ++ (if (mode == MacroCompilerAnnotation.CompileAndSynflops) { + libs.get + } else { + Seq.empty + }))) + (transforms foldLeft state) ((s, xform) => xform runTransform s).copy(form = outputForm) case _ => state } } @@ -670,6 +696,7 @@ object MacroCompiler extends App { type MacroParamMap = Map[MacroParam, String] type CostParamMap = Map[String, String] + type ForcedMemories = (Set[String], Set[String]) val modeOptions: Seq[String] = MacroCompilerAnnotation.options .map { case (_, cmd, description) => s" $cmd: $description" } val usage: String = (Seq( @@ -681,28 +708,35 @@ object MacroCompiler extends App { " -f, --firrtl: FIRRTL output (optional)", " -c, --cost-func: Cost function to use. Optional (default: \"default\")", " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", + " --force-compile [mem]: Force the given memory to be compiled to target libs regardless of the mode", + " --force-synflops [mem]: Force the given memory to be compiled via synflops regardless of the mode", " --mode:" ) ++ modeOptions) mkString "\n" - def parseArgs(map: MacroParamMap, costMap: CostParamMap, args: List[String]): (MacroParamMap, CostParamMap) = + def parseArgs(map: MacroParamMap, costMap: CostParamMap, forcedMemories: ForcedMemories, + args: List[String]): (MacroParamMap, CostParamMap, ForcedMemories) = args match { - case Nil => (map, costMap) + case Nil => (map, costMap, forcedMemories) case ("-m" | "--macro-list") :: value :: tail => - parseArgs(map + (Macros -> value), costMap, tail) + parseArgs(map + (Macros -> value), costMap, forcedMemories, tail) case ("-l" | "--library") :: value :: tail => - parseArgs(map + (Library -> value), costMap, tail) + parseArgs(map + (Library -> value), costMap, forcedMemories, tail) case ("-u" | "--use-compiler") :: tail => - parseArgs(map + (UseCompiler -> ""), costMap, tail) + parseArgs(map + (UseCompiler -> ""), costMap, forcedMemories, tail) case ("-v" | "--verilog") :: value :: tail => - parseArgs(map + (Verilog -> value), costMap, tail) + parseArgs(map + (Verilog -> value), costMap, forcedMemories, tail) case ("-f" | "--firrtl") :: value :: tail => - parseArgs(map + (Firrtl -> value), costMap, tail) + parseArgs(map + (Firrtl -> value), costMap, forcedMemories, tail) case ("-c" | "--cost-func") :: value :: tail => - parseArgs(map + (CostFunc -> value), costMap, tail) + parseArgs(map + (CostFunc -> value), costMap, forcedMemories, tail) case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => - parseArgs(map, costMap + (value1 -> value2), tail) + parseArgs(map, costMap + (value1 -> value2), forcedMemories, tail) + case "--force-compile" :: value :: tail => + parseArgs(map, costMap, forcedMemories.copy(_1 = forcedMemories._1 + value), tail) + case "--force-synflops" :: value :: tail => + parseArgs(map, costMap, forcedMemories.copy(_2 = forcedMemories._2 + value), tail) case "--mode" :: value :: tail => - parseArgs(map + (Mode -> value), costMap, tail) + parseArgs(map + (Mode -> value), costMap, forcedMemories, tail) case arg :: tail => println(s"Unknown field $arg\n") println(usage) @@ -710,7 +744,7 @@ object MacroCompiler extends App { } def run(args: List[String]) { - val (params, costParams) = parseArgs(Map[MacroParam, String](), Map[String, String](), args) + val (params, costParams, forcedMemories) = parseArgs(Map[MacroParam, String](), Map[String, String](), (Set.empty, Set.empty), args) try { val macros = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) @@ -725,7 +759,8 @@ object MacroCompiler extends App { params.get(Macros).get, params.get(Library), CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")), - params.contains(UseCompiler) + params.contains(UseCompiler), + forceCompile = forcedMemories._1, forceSynflops = forcedMemories._2 ) )) ) From f310d453813be59445344d05e7152443565fc308 Mon Sep 17 00:00:00 2001 From: Paul Rigge Date: Wed, 19 Dec 2018 22:54:46 +0000 Subject: [PATCH 111/273] Refactor barstools for new versions of things. - No handlebars (not being published for Scala 2.12) - Update for new annotations APIs Bump sbt-dependency-graph to 0.9.2 for this scala version --- build.sbt | 17 +- macros/src/main/scala/MacroCompiler.scala | 4 +- project/Dependencies.scala | 22 --- project/assembly.sbt | 2 +- project/build.properties | 2 +- .../src/main/scala/transforms/Generate.scala | 184 ++++++++++-------- .../scala/transforms/utils/FileUtils.scala | 13 +- .../scala/transforms/ResetInverterSpec.scala | 4 +- .../scala/transforms/retime/RetimeSpec.scala | 6 +- 9 files changed, 123 insertions(+), 131 deletions(-) delete mode 100644 project/Dependencies.scala diff --git a/build.sbt b/build.sbt index f18110c7f..d254ca248 100644 --- a/build.sbt +++ b/build.sbt @@ -1,21 +1,22 @@ // See LICENSE for license details. -import Dependencies._ - val defaultVersions = Map( - "chisel3" -> "3.1-SNAPSHOT", - "chisel-iotesters" -> "1.2-SNAPSHOT" + "chisel3" -> "3.2-SNAPSHOT", + "chisel-iotesters" -> "1.3-SNAPSHOT" ) lazy val commonSettings = Seq( organization := "edu.berkeley.cs", version := "0.1-SNAPSHOT", - scalaVersion := "2.11.8", - scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls"), - libraryDependencies ++= commonDependencies, + scalaVersion := "2.12.8", + scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls", "-Xsource:2.11"), libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) }, + libraryDependencies in Test ++= Seq( + "org.scalatest" %% "scalatest" % "2.2.5" % "test", + "org.scalacheck" %% "scalacheck" % "1.12.4" % "test" + ), resolvers ++= Seq( Resolver.sonatypeRepo("snapshots"), Resolver.sonatypeRepo("releases") @@ -30,7 +31,7 @@ lazy val macros = (project in file("macros")) .settings(commonSettings) .settings(Seq( libraryDependencies ++= Seq( - "edu.berkeley.cs" %% "firrtl-interpreter" % "0.1-SNAPSHOT" % Test + "edu.berkeley.cs" %% "firrtl-interpreter" % "1.2-SNAPSHOT" % Test ), mainClass := Some("barstools.macros.MacroCompiler") )) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 62bf518f2..410372dd7 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -752,7 +752,7 @@ object MacroCompiler extends App { // Note: the last macro in the input list is (seemingly arbitrarily) // determined as the firrtl "top-level module". val circuit = Circuit(NoInfo, macros, macros.last.name) - val annotations = AnnotationMap( + val annotations = AnnotationSeq( Seq(MacroCompilerAnnotation( circuit.main, MacroCompilerAnnotation.Params( @@ -764,7 +764,7 @@ object MacroCompiler extends App { ) )) ) - val state = CircuitState(circuit, HighForm, Some(annotations)) + val state = CircuitState(circuit, HighForm, annotations) // Run the compiler. val result = new MacroCompiler().compileAndEmit(state) diff --git a/project/Dependencies.scala b/project/Dependencies.scala deleted file mode 100644 index 5c327ec4a..000000000 --- a/project/Dependencies.scala +++ /dev/null @@ -1,22 +0,0 @@ -import sbt._ -import Keys._ - -object Dependencies { - val scalatestVersion = "2.2.5" - val scalatest = "org.scalatest" %% "scalatest" % scalatestVersion % "test" - val scalacheckVersion = "1.12.4" - val scalacheck = "org.scalacheck" %% "scalacheck" % scalacheckVersion % "test" - - // Templating! - val handlebarsVersion = "2.1.1" - val handlebars = "com.gilt" %% "handlebars-scala" % handlebarsVersion exclude("org.slf4j", "slf4j-simple") - // org.slf4j.slf4j-simple's StaticLoggerBinder (from handlebars) conflicts with - // ch.qos.logback.logback-classic's StaticLoggerBinder (from firrtl). - - val commonDependencies: Seq[ModuleID] = Seq( - scalatest, - scalacheck, - handlebars - ) - -} diff --git a/project/assembly.sbt b/project/assembly.sbt index 8956d3325..17de943f3 100644 --- a/project/assembly.sbt +++ b/project/assembly.sbt @@ -1,2 +1,2 @@ addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5") -addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.8.2") +addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2") diff --git a/project/build.properties b/project/build.properties index 7d789d45d..72f902892 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.13.12 \ No newline at end of file +sbt.version=1.2.7 diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 79bbd3b0f..0d976cdd9 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -23,70 +23,78 @@ object AllModules { } } -case class ParsedInput(args: Seq[String]) extends LazyLogging { - var input: Option[String] = None - var output: Option[String] = None - var topOutput: Option[String] = None - var harnessOutput: Option[String] = None - var annoFile: Option[String] = None - var synTop: Option[String] = None - var harnessTop: Option[String] = None - var seqMemFlags: Option[String] = Some("-o:unused.confg") - var listClocks: Option[String] = Some("-o:unused.clocks") - - var usedOptions = Set.empty[Integer] - args.zipWithIndex.foreach{ case (arg, i) => - arg match { - case "-i" => { - input = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "-o" => { - output = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--top-o" => { - topOutput = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--harness-o" => { - harnessOutput = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--anno-file" => { - annoFile = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--syn-top" => { - synTop = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--harness-top" => { - harnessTop = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--seq-mem-flags" => { - seqMemFlags = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case "--list-clocks" => { - listClocks = Some(args(i+1)) - usedOptions = usedOptions | Set(i+1) - } - case _ => { - if (! (usedOptions contains i)) { - logger.error("Unknown option " + arg) - } - } +trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => + var tapeoutOptions = TapeoutOptions() + + parser.note("tapeout options") + + parser.opt[String]("harness-o") + .abbr("tho") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + harnessOutput = Some(x) + ) + }.text { + "use this to generate a harness at " + } + + parser.opt[String]("syn-top") + .abbr("tst") + .valueName(" + tapeoutOptions = tapeoutOptions.copy( + synTop = Some(x) + ) + }.text { + "use this to set synTop" } - } + parser.opt[String]("harness-top") + .abbr("tht") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + harnessTop = Some(x) + ) + }.text { + "use this to set harnessTop" + } + + parser.opt[String]("list-clocks") + .abbr("tlc") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + listClocks = Some(x) + ) + }.text { + "use this to list " + } + + parser.note("") } +case class TapeoutOptions( + input: Option[String] = None, + output: Option[String] = None, + topOutput: Option[String] = None, + harnessOutput: Option[String] = None, + annoFile: Option[String] = None, + synTop: Option[String] = None, + harnessTop: Option[String] = None, + seqMemFlags: Option[String] = Some("-o:unused.confg"), + listClocks: Option[String] = Some("-o:unused.clocks") +) extends LazyLogging + // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { - lazy val options: ParsedInput = ParsedInput(args) + val optionsManager = new ExecutionOptionsManager("tapeout") with HasFirrtlOptions with HasTapeoutOptions + if (!optionsManager.parse(args)) { + throw new Exception("Error parsing options!") + } + lazy val options = optionsManager.tapeoutOptions lazy val input = options.input lazy val output = options.output lazy val topOutput = options.topOutput @@ -116,34 +124,32 @@ sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { pre ++ enumerate ++ post } - private def getFirstPhaseAnnotations(top: Boolean): AnnotationMap = { - if (top) { + private def getFirstPhaseAnnotations(top: Boolean): AnnotationSeq = { + if (top) { //Load annotations from file - val annotationArray = annoFile match { + val annotationArray: Seq[Annotation] = annoFile match { case None => Array[Annotation]() case Some(fileName) => { val annotations = new File(fileName) if(annotations.exists) { - val annotationsYaml = io.Source.fromFile(annotations).getLines().mkString("\n").parseYaml - annotationsYaml.convertTo[Array[Annotation]] + val annotationsYaml = io.Source.fromFile(annotations).getLines().mkString("\n") + Seq(AnnotationUtils.fromYaml(annotationsYaml)) // TODO } else { - Array[Annotation]() + Seq[Annotation]() } } } // add new annotations - AnnotationMap(Seq( - passes.memlib.InferReadWriteAnnotation( - s"${synTop.get}" - ), - passes.clocklist.ClockListAnnotation( + AnnotationSeq(Seq( + passes.memlib.InferReadWriteAnnotation, + passes.clocklist.ClockListAnnotation.parse( s"-c:${synTop.get}:-m:${synTop.get}:${listClocks.get}" ), - passes.memlib.ReplSeqMemAnnotation( + passes.memlib.ReplSeqMemAnnotation.parse( s"-c:${synTop.get}:${seqMemFlags.get}" ) ) ++ annotationArray) - } else { AnnotationMap(Seq.empty) } + } else { AnnotationSeq(Seq.empty) } } private def getSecondPhasePasses: Seq[Transform] = { @@ -156,31 +162,36 @@ sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { } // always the same for now - private def getSecondPhaseAnnotations: AnnotationMap = AnnotationMap(Seq.empty) + private def getSecondPhaseAnnotations: AnnotationSeq = AnnotationSeq(Seq.empty) // Top Generation protected def firstPhase(top: Boolean, harness: Boolean): Unit = { require(top || harness, "Must specify either top or harness") - firrtl.Driver.compile( - input.get, - topOutput.getOrElse(output.get), - new VerilogCompiler(), - Parser.UseInfo, - getFirstPhasePasses(top, harness), - getFirstPhaseAnnotations(top) + + val firrtlOptions = optionsManager.firrtlOptions + optionsManager.firrtlOptions = firrtlOptions.copy( + annotations = firrtlOptions.annotations ++ getFirstPhaseAnnotations(top) + ) + + optionsManager.firrtlOptions = firrtlOptions.copy( + customTransforms = firrtlOptions.customTransforms ++ getFirstPhasePasses(top, harness) ) } // Harness Generation protected def secondPhase: Unit = { - firrtl.Driver.compile( - input.get, - harnessOutput.getOrElse(output.get), - new VerilogCompiler(), - Parser.UseInfo, - getSecondPhasePasses, - getSecondPhaseAnnotations + val firrtlOptions = optionsManager.firrtlOptions + optionsManager.firrtlOptions = firrtlOptions.copy( + annotations = firrtlOptions.annotations ++ getSecondPhaseAnnotations ) + + optionsManager.firrtlOptions = firrtlOptions.copy( + customTransforms = firrtlOptions.customTransforms ++ getSecondPhasePasses + ) + } + + protected def execute: Unit = { + firrtl.Driver.execute(optionsManager) } } @@ -191,6 +202,7 @@ object GenerateTop extends GenerateTopAndHarnessApp { n => logger.warn(s"Not using generic output filename $n since you asked for just a top-level output and also specified a generic output.")}) // Only need a single phase to generate the top module firstPhase(top = true, harness = false) + execute } object GenerateHarness extends GenerateTopAndHarnessApp { @@ -206,6 +218,7 @@ object GenerateHarness extends GenerateTopAndHarnessApp { // Do minimal work for the first phase to generate test harness firstPhase(top = false, harness = true) secondPhase + execute } object GenerateTopAndHarness extends GenerateTopAndHarnessApp { @@ -214,4 +227,5 @@ object GenerateTopAndHarness extends GenerateTopAndHarnessApp { // Do everything, top and harness generation firstPhase(top = true, harness = true) secondPhase + execute } diff --git a/tapeout/src/main/scala/transforms/utils/FileUtils.scala b/tapeout/src/main/scala/transforms/utils/FileUtils.scala index 5fc358420..7baf3e146 100644 --- a/tapeout/src/main/scala/transforms/utils/FileUtils.scala +++ b/tapeout/src/main/scala/transforms/utils/FileUtils.scala @@ -15,13 +15,10 @@ object WriteConfig { object GetTargetDir { def apply(state: CircuitState): String = { - val annos = state.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations + val annos = state.annotations val destDir = annos.map { - case Annotation(f, t, s) if t == classOf[transforms.BlackBoxSourceHelper] => - transforms.BlackBoxSource.parse(s) match { - case Some(transforms.BlackBoxTargetDir(dest)) => Some(dest) - case _ => None - } + case Annotation(f, t, s) if t == classOf[firrtl.transforms.BlackBoxTargetDirAnno] => + Some(s) case _ => None }.flatten val loc = { @@ -45,14 +42,14 @@ class TechnologyLocation extends Transform { def outputForm: CircuitForm = LowForm def execute(state: CircuitState) = throw new Exception("Technology Location transform execution doesn't work!") def get(state: CircuitState): String = { - val annos = state.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations + val annos = state.annotations val dir = annos.map { case Annotation(f, t, s) if t == classOf[TechnologyLocation] => Some(s) case _ => None }.flatten dir.length match { case 0 => "" - case 1 => + case 1 => val targetDir = new java.io.File(dir.head) if(!targetDir.exists()) throw new Exception("Technology yaml directory doesn't exist!") dir.head diff --git a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala index 0ae41dc1e..7abcbf4c9 100644 --- a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala @@ -13,6 +13,8 @@ class ExampleModuleNeedsResetInverted extends Module with ResetInverter { val r = RegInit(0.U) + io.out := r + invert(this) } @@ -35,4 +37,4 @@ class ResetNSpec extends FreeSpec with Matchers { // bad } } -} \ No newline at end of file +} diff --git a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala index bd52b5bc0..7c6348370 100644 --- a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala +++ b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala @@ -25,10 +25,10 @@ class RetimeSpec extends FlatSpec with Matchers { it should "pass simple retime module annotation" in { val gen = () => new RetimeModule() val dir = uniqueDirName(gen, "RetimeModule") - chisel3.Driver.execute(Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final.anno"), gen) shouldBe a [ChiselExecutionSuccess] + chisel3.Driver.execute(Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final"), gen) shouldBe a [ChiselExecutionSuccess] - val lines = io.Source.fromFile(s"test_run_dir/$dir/final.anno").getLines().map(normalized).toSeq - lines should contain ("Annotation(ModuleName(RetimeModule,CircuitName(RetimeModule)),class barstools.tapeout.transforms.retime.RetimeTransform,retime)") + val lines = io.Source.fromFile(s"test_run_dir/$dir/final.anno.json").getLines().map(normalized).mkString("\n") + lines should include("barstools.tapeout.transforms.retime.RetimeTransform") } // TODO(azidar): need to fix/add instance annotations From 801abd98bb38c4ba88096399e3adda91a56009c4 Mon Sep 17 00:00:00 2001 From: Paul Rigge Date: Wed, 6 Feb 2019 12:26:53 -0800 Subject: [PATCH 112/273] Fix null pointer exception in options parser --- project/{assembly.sbt => plugins.sbt} | 0 .../src/main/scala/transforms/Generate.scala | 38 +++++++++++++------ 2 files changed, 26 insertions(+), 12 deletions(-) rename project/{assembly.sbt => plugins.sbt} (100%) diff --git a/project/assembly.sbt b/project/plugins.sbt similarity index 100% rename from project/assembly.sbt rename to project/plugins.sbt diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 0d976cdd9..5fab8aa99 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -28,6 +28,17 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => parser.note("tapeout options") + parser.opt[String]("top-o") + .abbr("tto") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + topOutput = Some(x) + ) + }.text { + "use this to generate top at " + } + parser.opt[String]("harness-o") .abbr("tho") .valueName("") @@ -41,7 +52,7 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => parser.opt[String]("syn-top") .abbr("tst") - .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( synTop = Some(x) @@ -89,10 +100,13 @@ case class TapeoutOptions( // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness -sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { - val optionsManager = new ExecutionOptionsManager("tapeout") with HasFirrtlOptions with HasTapeoutOptions - if (!optionsManager.parse(args)) { - throw new Exception("Error parsing options!") +sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => + lazy val optionsManager = { + val optionsManager = new ExecutionOptionsManager("tapeout") with HasFirrtlOptions with HasTapeoutOptions + if (!optionsManager.parse(args)) { + throw new Exception("Error parsing options!") + } + optionsManager } lazy val options = optionsManager.tapeoutOptions lazy val input = options.input @@ -195,17 +209,17 @@ sealed trait GenerateTopAndHarnessApp extends App with LazyLogging { } } -object GenerateTop extends GenerateTopAndHarnessApp { +object GenerateTop extends App with GenerateTopAndHarnessApp { // warn about unused options harnessOutput.foreach(n => logger.warn(s"Not using harness output filename $n since you asked for just a top-level output.")) - topOutput.foreach(_.foreach{ - n => logger.warn(s"Not using generic output filename $n since you asked for just a top-level output and also specified a generic output.")}) + topOutput.foreach( + n => logger.warn(s"Not using generic output filename $n since you asked for just a top-level output and also specified a generic output.")) // Only need a single phase to generate the top module firstPhase(top = true, harness = false) execute } -object GenerateHarness extends GenerateTopAndHarnessApp { +object GenerateHarness extends App with GenerateTopAndHarnessApp { // warn about unused options topOutput.foreach(n => logger.warn(s"Not using top-level output filename $n since you asked for just a test harness.")) annoFile.foreach(n => logger.warn(s"Not using annotations file $n since you asked for just a test harness.")) @@ -213,15 +227,15 @@ object GenerateHarness extends GenerateTopAndHarnessApp { n => logger.warn(s"Not using SeqMem flags $n since you asked for just a test harness.") } listClocks.filter(_ != "-o:unused.clocks").foreach { n => logger.warn(s"Not using clocks list $n since you asked for just a test harness.") } - harnessOutput.foreach(_.foreach{ - n => logger.warn(s"Not using generic output filename $n since you asked for just a test harness and also specified a generic output.")}) + harnessOutput.foreach( + n => logger.warn(s"Not using generic output filename $n since you asked for just a test harness and also specified a generic output.")) // Do minimal work for the first phase to generate test harness firstPhase(top = false, harness = true) secondPhase execute } -object GenerateTopAndHarness extends GenerateTopAndHarnessApp { +object GenerateTopAndHarness extends App with GenerateTopAndHarnessApp { // warn about unused options output.foreach(n => logger.warn(s"Not using generic output filename $n since you asked for both a top-level output and a test harness.")) // Do everything, top and harness generation From 7bbf7f00f6d363ff9dbf86aa72aaf8b94c830217 Mon Sep 17 00:00:00 2001 From: Paul Rigge Date: Wed, 6 Feb 2019 17:14:28 -0800 Subject: [PATCH 113/273] Run transforms in slightly different order Also, don't rename TestHarness. --- tapeout/src/main/scala/transforms/Generate.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 5fab8aa99..24472d181 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -122,14 +122,14 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => private def getFirstPhasePasses(top: Boolean, harness: Boolean): Seq[Transform] = { val pre = Seq( new ReParentCircuit(synTop.get), - new RemoveUnusedModules ) val enumerate = if (harness) { Seq( - new EnumerateModules( { m => if (m.name != options.synTop.get) { AllModules.add(m.name) } } ) + new EnumerateModules( { m => if (m.name != options.harnessTop.get && m.name != options.synTop.get) { AllModules.add(m.name) } } ), ) } else Seq() val post = if (top) { Seq( + new RemoveUnusedModules, new passes.memlib.InferReadWrite(), new passes.memlib.ReplSeqMem(), new passes.clocklist.ClockListTransform() @@ -170,8 +170,8 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => // always the same for now Seq( new ConvertToExtMod((m) => m.name == synTop.get), - new RemoveUnusedModules, - new RenameModulesAndInstances((m) => AllModules.rename(m)) + new RenameModulesAndInstances((m) => AllModules.rename(m)), + // new RemoveUnusedModules, ) } From 22e6d9c5d422e249a3a1d1088799dd77ff0dd9f0 Mon Sep 17 00:00:00 2001 From: Paul Rigge Date: Wed, 6 Feb 2019 20:28:53 -0800 Subject: [PATCH 114/273] Fix repl-seq-mem --- tapeout/src/main/scala/transforms/Generate.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 24472d181..2c68a4e8b 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -131,7 +131,6 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => val post = if (top) { Seq( new RemoveUnusedModules, new passes.memlib.InferReadWrite(), - new passes.memlib.ReplSeqMem(), new passes.clocklist.ClockListTransform() ) } else Seq() From c8efc5e88bfd25214a73723054524a21bea66433 Mon Sep 17 00:00:00 2001 From: John Wright Date: Wed, 6 Feb 2019 21:48:14 -0800 Subject: [PATCH 115/273] Refactor the harness generation; use upstream arguments and passes where appropriate --- .../src/main/scala/transforms/Generate.scala | 163 ++++-------------- 1 file changed, 33 insertions(+), 130 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 2c68a4e8b..f9b3e8dc2 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -15,10 +15,10 @@ object AllModules { def add(module: String) = { modules = modules | Set(module) } - def rename(module: String) = { + def rename(module: String, suffix: String = "_inTestHarness") = { var new_name = module while (modules.contains(new_name)) - new_name = new_name + "_inTestHarness" + new_name = new_name + suffix new_name } } @@ -28,17 +28,6 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => parser.note("tapeout options") - parser.opt[String]("top-o") - .abbr("tto") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - topOutput = Some(x) - ) - }.text { - "use this to generate top at " - } - parser.opt[String]("harness-o") .abbr("tho") .valueName("") @@ -72,173 +61,87 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => "use this to set harnessTop" } - parser.opt[String]("list-clocks") - .abbr("tlc") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - listClocks = Some(x) - ) - }.text { - "use this to list " - } - parser.note("") } case class TapeoutOptions( - input: Option[String] = None, - output: Option[String] = None, - topOutput: Option[String] = None, harnessOutput: Option[String] = None, - annoFile: Option[String] = None, synTop: Option[String] = None, - harnessTop: Option[String] = None, - seqMemFlags: Option[String] = Some("-o:unused.confg"), - listClocks: Option[String] = Some("-o:unused.clocks") + harnessTop: Option[String] = None ) extends LazyLogging // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => - lazy val optionsManager = { + def getOptionsManager = { val optionsManager = new ExecutionOptionsManager("tapeout") with HasFirrtlOptions with HasTapeoutOptions if (!optionsManager.parse(args)) { throw new Exception("Error parsing options!") } optionsManager } - lazy val options = optionsManager.tapeoutOptions - lazy val input = options.input - lazy val output = options.output - lazy val topOutput = options.topOutput - lazy val harnessOutput = options.harnessOutput - lazy val annoFile = options.annoFile - lazy val synTop = options.synTop - lazy val harnessTop = options.harnessTop - lazy val seqMemFlags = options.seqMemFlags - lazy val listClocks = options.listClocks - - private def getFirstPhasePasses(top: Boolean, harness: Boolean): Seq[Transform] = { - val pre = Seq( + lazy val optionsManager = getOptionsManager + lazy val tapeoutOptions = optionsManager.tapeoutOptions + // Tapeout options + lazy val harnessOutput = tapeoutOptions.harnessOutput + lazy val synTop = tapeoutOptions.synTop + lazy val harnessTop = tapeoutOptions.harnessTop + + lazy val firrtlOptions = optionsManager.firrtlOptions + // FIRRTL options + lazy val annoFiles = firrtlOptions.annotationFileNames + + private def getFirstPhasePasses: Seq[Transform] = { + Seq( new ReParentCircuit(synTop.get), + new RemoveUnusedModules ) - - val enumerate = if (harness) { Seq( - new EnumerateModules( { m => if (m.name != options.harnessTop.get && m.name != options.synTop.get) { AllModules.add(m.name) } } ), - ) } else Seq() - - val post = if (top) { Seq( - new RemoveUnusedModules, - new passes.memlib.InferReadWrite(), - new passes.clocklist.ClockListTransform() - ) } else Seq() - - pre ++ enumerate ++ post - } - - private def getFirstPhaseAnnotations(top: Boolean): AnnotationSeq = { - if (top) { - //Load annotations from file - val annotationArray: Seq[Annotation] = annoFile match { - case None => Array[Annotation]() - case Some(fileName) => { - val annotations = new File(fileName) - if(annotations.exists) { - val annotationsYaml = io.Source.fromFile(annotations).getLines().mkString("\n") - Seq(AnnotationUtils.fromYaml(annotationsYaml)) // TODO - } else { - Seq[Annotation]() - } - } - } - // add new annotations - AnnotationSeq(Seq( - passes.memlib.InferReadWriteAnnotation, - passes.clocklist.ClockListAnnotation.parse( - s"-c:${synTop.get}:-m:${synTop.get}:${listClocks.get}" - ), - passes.memlib.ReplSeqMemAnnotation.parse( - s"-c:${synTop.get}:${seqMemFlags.get}" - ) - ) ++ annotationArray) - } else { AnnotationSeq(Seq.empty) } } private def getSecondPhasePasses: Seq[Transform] = { - // always the same for now Seq( new ConvertToExtMod((m) => m.name == synTop.get), - new RenameModulesAndInstances((m) => AllModules.rename(m)), - // new RemoveUnusedModules, + new EnumerateModules( { m => if (m.name != tapeoutOptions.harnessTop.get && m.name != tapeoutOptions.synTop.get) { AllModules.add(m.name) } } ), + new RenameModulesAndInstances((m) => AllModules.rename(m, "_in" + harnessTop.get)), + new RemoveUnusedModules ) } - // always the same for now - private def getSecondPhaseAnnotations: AnnotationSeq = AnnotationSeq(Seq.empty) - // Top Generation - protected def firstPhase(top: Boolean, harness: Boolean): Unit = { - require(top || harness, "Must specify either top or harness") + protected def firstPhase: Unit = { - val firrtlOptions = optionsManager.firrtlOptions - optionsManager.firrtlOptions = firrtlOptions.copy( - annotations = firrtlOptions.annotations ++ getFirstPhaseAnnotations(top) + val firstPhaseOptions = getOptionsManager + firstPhaseOptions.firrtlOptions = firstPhaseOptions.firrtlOptions.copy( + customTransforms = firrtlOptions.customTransforms ++ getFirstPhasePasses ) - optionsManager.firrtlOptions = firrtlOptions.copy( - customTransforms = firrtlOptions.customTransforms ++ getFirstPhasePasses(top, harness) - ) + firrtl.Driver.execute(firstPhaseOptions) } // Harness Generation protected def secondPhase: Unit = { - val firrtlOptions = optionsManager.firrtlOptions - optionsManager.firrtlOptions = firrtlOptions.copy( - annotations = firrtlOptions.annotations ++ getSecondPhaseAnnotations + val secondPhaseOptions = getOptionsManager + secondPhaseOptions.firrtlOptions = secondPhaseOptions.firrtlOptions.copy( + outputFileNameOverride = harnessOutput.get, + customTransforms = getSecondPhasePasses ) - optionsManager.firrtlOptions = firrtlOptions.copy( - customTransforms = firrtlOptions.customTransforms ++ getSecondPhasePasses - ) - } - - protected def execute: Unit = { - firrtl.Driver.execute(optionsManager) + firrtl.Driver.execute(secondPhaseOptions) } } object GenerateTop extends App with GenerateTopAndHarnessApp { - // warn about unused options - harnessOutput.foreach(n => logger.warn(s"Not using harness output filename $n since you asked for just a top-level output.")) - topOutput.foreach( - n => logger.warn(s"Not using generic output filename $n since you asked for just a top-level output and also specified a generic output.")) // Only need a single phase to generate the top module - firstPhase(top = true, harness = false) - execute + firstPhase } object GenerateHarness extends App with GenerateTopAndHarnessApp { - // warn about unused options - topOutput.foreach(n => logger.warn(s"Not using top-level output filename $n since you asked for just a test harness.")) - annoFile.foreach(n => logger.warn(s"Not using annotations file $n since you asked for just a test harness.")) - seqMemFlags.filter(_ != "-o:unused.confg").foreach { - n => logger.warn(s"Not using SeqMem flags $n since you asked for just a test harness.") } - listClocks.filter(_ != "-o:unused.clocks").foreach { - n => logger.warn(s"Not using clocks list $n since you asked for just a test harness.") } - harnessOutput.foreach( - n => logger.warn(s"Not using generic output filename $n since you asked for just a test harness and also specified a generic output.")) // Do minimal work for the first phase to generate test harness - firstPhase(top = false, harness = true) secondPhase - execute } object GenerateTopAndHarness extends App with GenerateTopAndHarnessApp { - // warn about unused options - output.foreach(n => logger.warn(s"Not using generic output filename $n since you asked for both a top-level output and a test harness.")) // Do everything, top and harness generation - firstPhase(top = true, harness = true) + firstPhase secondPhase - execute } From 79b8fd324be70f6fd96ced62b733f67dd0aac1da Mon Sep 17 00:00:00 2001 From: John Wright Date: Fri, 8 Feb 2019 01:40:59 -0800 Subject: [PATCH 116/273] This compiles and works correctly, but is kind of hacky, and will break as soon as any additional external/blackbox modules are added to the test harness. The test harness should detect external modules and not rename them instead of what is happening here. --- mdf | 2 +- .../src/main/scala/transforms/Generate.scala | 61 ++++++------------- .../transforms/RemoveUnusedModules.scala | 38 ++++++------ 3 files changed, 41 insertions(+), 60 deletions(-) diff --git a/mdf b/mdf index ee50cc2b0..c13e31656 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit ee50cc2b096c5d7f64afdd9a54db40a9cc2ca484 +Subproject commit c13e31656e1ce572e03acf465beb00e157dcbc06 diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index f9b3e8dc2..8d4caccc5 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -10,19 +10,6 @@ import firrtl.annotations.AnnotationYamlProtocol._ import net.jcazevedo.moultingyaml._ import com.typesafe.scalalogging.LazyLogging -object AllModules { - private var modules = Set[String]() - def add(module: String) = { - modules = modules | Set(module) - } - def rename(module: String, suffix: String = "_inTestHarness") = { - var new_name = module - while (modules.contains(new_name)) - new_name = new_name + suffix - new_name - } -} - trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => var tapeoutOptions = TapeoutOptions() @@ -61,7 +48,6 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => "use this to set harnessTop" } - parser.note("") } case class TapeoutOptions( @@ -73,75 +59,66 @@ case class TapeoutOptions( // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => - def getOptionsManager = { + lazy val optionsManager = { val optionsManager = new ExecutionOptionsManager("tapeout") with HasFirrtlOptions with HasTapeoutOptions if (!optionsManager.parse(args)) { throw new Exception("Error parsing options!") } optionsManager } - lazy val optionsManager = getOptionsManager lazy val tapeoutOptions = optionsManager.tapeoutOptions // Tapeout options - lazy val harnessOutput = tapeoutOptions.harnessOutput lazy val synTop = tapeoutOptions.synTop lazy val harnessTop = tapeoutOptions.harnessTop - lazy val firrtlOptions = optionsManager.firrtlOptions // FIRRTL options lazy val annoFiles = firrtlOptions.annotationFileNames - private def getFirstPhasePasses: Seq[Transform] = { + private def topTransforms: Seq[Transform] = { Seq( new ReParentCircuit(synTop.get), new RemoveUnusedModules ) } - private def getSecondPhasePasses: Seq[Transform] = { + + private def harnessTransforms: Seq[Transform] = { + // XXX this is a hack, we really should be checking the masters to see if they are ExtModules + val externals = Set(harnessTop.get, synTop.get, "SimSerial") Seq( new ConvertToExtMod((m) => m.name == synTop.get), - new EnumerateModules( { m => if (m.name != tapeoutOptions.harnessTop.get && m.name != tapeoutOptions.synTop.get) { AllModules.add(m.name) } } ), - new RenameModulesAndInstances((m) => AllModules.rename(m, "_in" + harnessTop.get)), - new RemoveUnusedModules + new RemoveUnusedModules, + new RenameModulesAndInstances((old) => if (externals contains old) old else (old + "_in" + harnessTop.get)) ) } // Top Generation - protected def firstPhase: Unit = { + protected def executeTop: Unit = { - val firstPhaseOptions = getOptionsManager - firstPhaseOptions.firrtlOptions = firstPhaseOptions.firrtlOptions.copy( - customTransforms = firrtlOptions.customTransforms ++ getFirstPhasePasses + optionsManager.firrtlOptions = optionsManager.firrtlOptions.copy( + customTransforms = firrtlOptions.customTransforms ++ topTransforms ) - firrtl.Driver.execute(firstPhaseOptions) + firrtl.Driver.execute(optionsManager) } // Harness Generation - protected def secondPhase: Unit = { - val secondPhaseOptions = getOptionsManager - secondPhaseOptions.firrtlOptions = secondPhaseOptions.firrtlOptions.copy( - outputFileNameOverride = harnessOutput.get, - customTransforms = getSecondPhasePasses + protected def executeHarness: Unit = { + + optionsManager.firrtlOptions = optionsManager.firrtlOptions.copy( + customTransforms = harnessTransforms ) - firrtl.Driver.execute(secondPhaseOptions) + firrtl.Driver.execute(optionsManager) } } object GenerateTop extends App with GenerateTopAndHarnessApp { // Only need a single phase to generate the top module - firstPhase + executeTop } object GenerateHarness extends App with GenerateTopAndHarnessApp { // Do minimal work for the first phase to generate test harness - secondPhase -} - -object GenerateTopAndHarness extends App with GenerateTopAndHarnessApp { - // Do everything, top and harness generation - firstPhase - secondPhase + executeHarness } diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala index 848b7a2c8..410c7326b 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -5,13 +5,15 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ import firrtl.passes.Pass +import firrtl.annotations.{SingleTargetAnnotation, Annotation} +import firrtl.transforms.DontTouchAnnotation -// Removes all the unused modules in a circuit by recursing through every -// instance (starting at the main module) -class RemoveUnusedModulesPass extends Pass { +class RemoveUnusedModules extends Transform { + def inputForm = MidForm + def outputForm = MidForm - def run(c: Circuit): Circuit = { - val modulesByName = c.modules.map{ + def execute(state: CircuitState): CircuitState = { + val modulesByName = state.circuit.modules.map{ case m: Module => (m.name, Some(m)) case m: ExtModule => (m.name, None) }.toMap @@ -39,21 +41,23 @@ class RemoveUnusedModulesPass extends Pass { case None => Set.empty[String] } } - val usedModuleSet = getUsedModules(modulesByName(c.main)) + val usedModuleSet = getUsedModules(modulesByName(state.circuit.main)) - val usedModuleSeq = c.modules.filter { usedModuleSet contains _.name } + val usedModuleSeq = state.circuit.modules.filter { usedModuleSet contains _.name } + val usedModuleNames = usedModuleSeq.map(_.name) - Circuit(c.info, usedModuleSeq, c.main) - } -} + val renames = state.renames.getOrElse(RenameMap()) -class RemoveUnusedModules extends Transform with SeqTransformBased { - def inputForm = MidForm - def outputForm = MidForm - def transforms = Seq(new RemoveUnusedModulesPass) + //state.circuit.modules.filterNot { usedModuleSet contains _.name } foreach { x => renames.record(ModuleTarget(state.circuit.main, x.name), Seq()) } - def execute(state: CircuitState): CircuitState = { - val ret = runTransforms(state) - CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) + val newCircuit = Circuit(state.circuit.info, usedModuleSeq, state.circuit.main) + val newAnnos = AnnotationSeq(state.annotations.toSeq.filter { _ match { + // XXX This is wrong, but it works for now + case x: DontTouchAnnotation => false + //case x: DontTouchAnnotation => usedModuleNames contains x.target.module + case _ => true + }}) + + CircuitState(newCircuit, outputForm, newAnnos, Some(renames)) } } From 12842cb3a79bb6d59d61ebb2af17c4a67fd42d6e Mon Sep 17 00:00:00 2001 From: John Wright Date: Mon, 11 Feb 2019 22:54:01 -0800 Subject: [PATCH 117/273] Add MemConf and change MacroCompiler to use a conf file instead of MDF JSON --- macros/src/main/scala/MacroCompiler.scala | 8 +-- macros/src/main/scala/MemConf.scala | 55 ++++++++++++++++ macros/src/main/scala/Utils.scala | 77 ++++++++++++++++++++++- 3 files changed, 135 insertions(+), 5 deletions(-) create mode 100644 macros/src/main/scala/MemConf.scala diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 410372dd7..9e0c72ed0 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -618,7 +618,7 @@ class MacroCompilerTransform extends Transform { assert((forceCompile intersect forceSynflops).isEmpty, "Cannot have modules both forced to compile and synflops") // Read, eliminate None, get only SRAM, make firrtl macro - val mems: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) match { + val mems: Option[Seq[Macro]] = Utils.readConfFromPath(Some(memFile)) match { case Some(x:Seq[mdf.macrolib.Macro]) => Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) case _ => None @@ -701,7 +701,7 @@ object MacroCompiler extends App { .map { case (_, cmd, description) => s" $cmd: $description" } val usage: String = (Seq( "Options:", - " -m, --macro-list: The set of macros to compile", + " -m, --macro-conf: The set of macros to compile in firrtl-generated conf format", " -l, --library: The set of macros that have blackbox instances", " -u, --use-compiler: Flag, whether to use the memory compiler defined in library", " -v, --verilog: Verilog output", @@ -717,7 +717,7 @@ object MacroCompiler extends App { args: List[String]): (MacroParamMap, CostParamMap, ForcedMemories) = args match { case Nil => (map, costMap, forcedMemories) - case ("-m" | "--macro-list") :: value :: tail => + case ("-m" | "--macro-conf") :: value :: tail => parseArgs(map + (Macros -> value), costMap, forcedMemories, tail) case ("-l" | "--library") :: value :: tail => parseArgs(map + (Library -> value), costMap, forcedMemories, tail) @@ -746,7 +746,7 @@ object MacroCompiler extends App { def run(args: List[String]) { val (params, costParams, forcedMemories) = parseArgs(Map[MacroParam, String](), Map[String, String](), (Set.empty, Set.empty), args) try { - val macros = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) + val macros = Utils.filterForSRAM(Utils.readConfFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) if (macros.nonEmpty) { // Note: the last macro in the input list is (seemingly arbitrarily) diff --git a/macros/src/main/scala/MemConf.scala b/macros/src/main/scala/MemConf.scala new file mode 100644 index 000000000..d277c066d --- /dev/null +++ b/macros/src/main/scala/MemConf.scala @@ -0,0 +1,55 @@ +// See LICENSE for license details. + +package barstools.macros + +import scala.util.matching._ + +sealed abstract class MemPort(val name: String) { override def toString = name } + +case object ReadPort extends MemPort("read") +case object WritePort extends MemPort("write") +case object MaskWritePort extends MemPort("mwrite") +case object ReadWritePort extends MemPort("rw") +case object MaskReadWritePort extends MemPort("mrw") + +object MemPort { + + val all = Set(ReadPort, WritePort, MaskWritePort, ReadWritePort, MaskReadWritePort) + + def apply(s: String): Option[MemPort] = MemPort.all.find(_.name == s) + + def fromString(s: String): Seq[MemPort] = { + s.split(",").toSeq.map(MemPort.apply).map(_ match { + case Some(x) => x + case _ => throw new Exception(s"Error parsing MemPort string : ${s}") + }) + } +} + +// This is based on firrtl.passes.memlib.ConfWriter +// TODO standardize this in FIRRTL +case class MemConf( + name: String, + depth: Int, + width: Int, + ports: Seq[MemPort], + maskGranularity: Option[Int] +) { + + private def portsStr = ports.map(_.name).mkString(",") + private def maskGranStr = maskGranularity.map((p) => s"mask_gran $p").getOrElse("") + + override def toString() = s"name ${name} depth ${depth} width ${width} ports ${portsStr} ${maskGranStr} " +} + +object MemConf { + + val regex = raw"\s*name\s+(\w+)\s+depth\s+(\d+)\s+width\s+(\d+)\s+ports\s+([^\s]+)\s+(?:mask_gran\s+(\d+))?\s*".r + + def fromString(s: String): Seq[MemConf] = { + s.split("\n").toSeq.map(_ match { + case MemConf.regex(name, depth, width, ports, maskGran) => MemConf(name, depth.toInt, width.toInt, MemPort.fromString(ports), Option(maskGran).map(_.toInt)) + case _ => throw new Exception(s"Error parsing MemConf string : ${s}") + }) + } +} diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index ded5c53f8..6a420961f 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -7,7 +7,7 @@ import firrtl.ir._ import firrtl.PrimOps import firrtl.Utils.{ceilLog2, BoolType} import mdf.macrolib.{Constant, MacroPort, SRAMMacro} -import mdf.macrolib.{PolarizedPort, PortPolarity, ActiveLow, ActiveHigh, NegativeEdge, PositiveEdge} +import mdf.macrolib.{PolarizedPort, PortPolarity, ActiveLow, ActiveHigh, NegativeEdge, PositiveEdge, MacroExtraPort} import java.io.File import scala.language.implicitConversions @@ -72,6 +72,81 @@ object Utils { case _ => None } } + // This utility reads a conf in and returns MDF like mdf.macrolib.Utils.readMDFFromPath + def readConfFromPath(path: Option[String]): Option[Seq[mdf.macrolib.Macro]] = { + path.map((p) => Utils.readConfFromString(scala.io.Source.fromFile(p).mkString)) + } + def readConfFromString(str: String): Seq[mdf.macrolib.Macro] = { + MemConf.fromString(str).map { m:MemConf => + SRAMMacro(m.name, m.width, m.depth, "", Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, m.ports), Seq.empty[MacroExtraPort]) + } + } + // This translates between two represenations of ports + def portSpecToMacroPort(width: Int, depth: Int, maskGran: Option[Int], ports: Seq[MemPort]): Seq[MacroPort] = { + var numR = 0 + var numW = 0 + var numRW = 0 + ports.map { _ match { + case ReadPort => { + val portName = s"R${numR}" + numR += 1 + MacroPort( + width=Some(width), depth=Some(depth), + address=PolarizedPort(s"${portName}_address", ActiveHigh), + clock=PolarizedPort(s"${portName}_clock", PositiveEdge), + readEnable=Some(PolarizedPort(s"${portName}_ren", ActiveHigh)), + output=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) } + case WritePort => { + val portName = s"W${numW}" + numW += 1 + MacroPort( + width=Some(width), depth=Some(depth), + address=PolarizedPort(s"${portName}_address", ActiveHigh), + clock=PolarizedPort(s"${portName}_clock", PositiveEdge), + writeEnable=Some(PolarizedPort(s"${portName}_wen", ActiveHigh)), + input=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) } + case MaskWritePort => { + val portName = s"W${numW}" + numW += 1 + MacroPort( + width=Some(width), depth=Some(depth), + address=PolarizedPort(s"${portName}_address", ActiveHigh), + clock=PolarizedPort(s"${portName}_clock", PositiveEdge), + writeEnable=Some(PolarizedPort(s"${portName}_wen", ActiveHigh)), + maskPort=Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), + maskGran=maskGran, + input=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) } + case ReadWritePort => { + val portName = s"RW${numRW}" + numRW += 1 + MacroPort( + width=Some(width), depth=Some(depth), + address=PolarizedPort(s"${portName}_address", ActiveHigh), + clock=PolarizedPort(s"${portName}_clock", PositiveEdge), + writeEnable=Some(PolarizedPort(s"${portName}_wen", ActiveHigh)), + readEnable=Some(PolarizedPort(s"${portName}_ren", ActiveHigh)), + input=Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), + output=Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) + ) } + case MaskReadWritePort => { + val portName = s"RW${numRW}" + numRW += 1 + MacroPort( + width=Some(width), depth=Some(depth), + address=PolarizedPort(s"${portName}_address", ActiveHigh), + clock=PolarizedPort(s"${portName}_clock", PositiveEdge), + writeEnable=Some(PolarizedPort(s"${portName}_wen", ActiveHigh)), + readEnable=Some(PolarizedPort(s"${portName}_ren", ActiveHigh)), + maskPort=Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), + maskGran=maskGran, + input=Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), + output=Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) + ) } + }} + } def findSRAMCompiler(s: Option[Seq[mdf.macrolib.Macro]]): Option[mdf.macrolib.SRAMCompiler] = { s match { case Some(l:Seq[mdf.macrolib.Macro]) => From d861fdd95c5d81e01de2ff9ed07929633a82e785 Mon Sep 17 00:00:00 2001 From: John Wright Date: Tue, 12 Feb 2019 10:55:34 -0800 Subject: [PATCH 118/273] Don't run DCE && Profit --- macros/src/main/scala/MacroCompiler.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 9e0c72ed0..256c55fe8 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -13,6 +13,7 @@ import firrtl.ir._ import firrtl.PrimOps import firrtl.Utils._ import firrtl.annotations._ +import firrtl.transforms.{NoDCEAnnotation} import firrtl.CompilerUtils.getLoweringTransforms import mdf.macrolib.{PolarizedPort, PortPolarity} import scala.collection.mutable.{ArrayBuffer, HashMap} @@ -764,7 +765,8 @@ object MacroCompiler extends App { ) )) ) - val state = CircuitState(circuit, HighForm, annotations) + // Append a NoDCEAnnotation to avoid dead code elimination removing the non-parent SRAMs + val state = CircuitState(circuit, HighForm, annotations :+ NoDCEAnnotation) // Run the compiler. val result = new MacroCompiler().compileAndEmit(state) From f0c7bab3eaaa6977b4971e380c9f1cde49c21744 Mon Sep 17 00:00:00 2001 From: John Wright Date: Tue, 12 Feb 2019 11:05:06 -0800 Subject: [PATCH 119/273] Use the correct 'magic values' for the port names Ensure backwards compatiblity by using -m for MDF input and -n for conf input. Also fix the naming scheme for memory ports. --- macros/src/main/scala/MacroCompiler.scala | 28 +++++++++++++----- macros/src/main/scala/Utils.scala | 36 +++++++++++------------ 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 256c55fe8..f04825375 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -74,13 +74,14 @@ object MacroCompilerAnnotation { * Parameters associated to this MacroCompilerAnnotation. * * @param mem Path to memory lib + * @param memMode Type of memory lib (Some("conf"), Some("mdf"), or None (defaults to mdf)) * @param lib Path to library lib or None if no libraries * @param costMetric Cost metric to use * @param mode Compiler mode (see CompilerMode) * @param forceCompile Set of memories to force compiling to lib regardless of the mode * @param forceSynflops Set of memories to force compiling as flops regardless of the mode */ - case class Params(mem: String, lib: Option[String], costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean, + case class Params(mem: String, memMode: Option[String], lib: Option[String], costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean, forceCompile: Set[String], forceSynflops: Set[String]) /** @@ -610,7 +611,7 @@ class MacroCompilerTransform extends Transform { def execute(state: CircuitState) = getMyAnnotations(state) match { case Seq(MacroCompilerAnnotation(state.circuit.main, - MacroCompilerAnnotation.Params(memFile, libFile, costMetric, mode, useCompiler, forceCompile, forceSynflops))) => + MacroCompilerAnnotation.Params(memFile, memFileFormat, libFile, costMetric, mode, useCompiler, forceCompile, forceSynflops))) => if (mode == MacroCompilerAnnotation.FallbackSynflops) { throw new UnsupportedOperationException("Not implemented yet") } @@ -619,7 +620,10 @@ class MacroCompilerTransform extends Transform { assert((forceCompile intersect forceSynflops).isEmpty, "Cannot have modules both forced to compile and synflops") // Read, eliminate None, get only SRAM, make firrtl macro - val mems: Option[Seq[Macro]] = Utils.readConfFromPath(Some(memFile)) match { + val mems: Option[Seq[Macro]] = (memFileFormat match { + case Some("conf") => Utils.readConfFromPath(Some(memFile)) + case _ => mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) + }) match { case Some(x:Seq[mdf.macrolib.Macro]) => Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) case _ => None @@ -688,6 +692,7 @@ class MacroCompiler extends Compiler { object MacroCompiler extends App { sealed trait MacroParam case object Macros extends MacroParam + case object MacrosFormat extends MacroParam case object Library extends MacroParam case object Verilog extends MacroParam case object Firrtl extends MacroParam @@ -702,7 +707,8 @@ object MacroCompiler extends App { .map { case (_, cmd, description) => s" $cmd: $description" } val usage: String = (Seq( "Options:", - " -m, --macro-conf: The set of macros to compile in firrtl-generated conf format", + " -n, --macro-conf: The set of macros to compile in firrtl-generated conf format (exclusive with -m)", + " -m, --macro-mdf: The set of macros to compile in MDF JSON format (exclusive with -n)", " -l, --library: The set of macros that have blackbox instances", " -u, --use-compiler: Flag, whether to use the memory compiler defined in library", " -v, --verilog: Verilog output", @@ -718,8 +724,10 @@ object MacroCompiler extends App { args: List[String]): (MacroParamMap, CostParamMap, ForcedMemories) = args match { case Nil => (map, costMap, forcedMemories) - case ("-m" | "--macro-conf") :: value :: tail => - parseArgs(map + (Macros -> value), costMap, forcedMemories, tail) + case ("-n" | "--macro-conf") :: value :: tail => + parseArgs(map + (Macros -> value) + (MacrosFormat -> "conf"), costMap, forcedMemories, tail) + case ("-m" | "--macro-mdf") :: value :: tail => + parseArgs(map + (Macros -> value) + (MacrosFormat -> "mdf"), costMap, forcedMemories, tail) case ("-l" | "--library") :: value :: tail => parseArgs(map + (Library -> value), costMap, forcedMemories, tail) case ("-u" | "--use-compiler") :: tail => @@ -747,7 +755,11 @@ object MacroCompiler extends App { def run(args: List[String]) { val (params, costParams, forcedMemories) = parseArgs(Map[MacroParam, String](), Map[String, String](), (Set.empty, Set.empty), args) try { - val macros = Utils.filterForSRAM(Utils.readConfFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) + val macros = if (params.get(MacrosFormat) == Some("conf")) { + Utils.filterForSRAM(Utils.readConfFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) + } else { + Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) + } if (macros.nonEmpty) { // Note: the last macro in the input list is (seemingly arbitrarily) @@ -757,7 +769,7 @@ object MacroCompiler extends App { Seq(MacroCompilerAnnotation( circuit.main, MacroCompilerAnnotation.Params( - params.get(Macros).get, params.get(Library), + params.get(Macros).get, params.get(MacrosFormat), params.get(Library), CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")), params.contains(UseCompiler), diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 6a420961f..ee9ef3b11 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -92,9 +92,9 @@ object Utils { numR += 1 MacroPort( width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_address", ActiveHigh), - clock=PolarizedPort(s"${portName}_clock", PositiveEdge), - readEnable=Some(PolarizedPort(s"${portName}_ren", ActiveHigh)), + address=PolarizedPort(s"${portName}_addr", ActiveHigh), + clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), output=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) ) } case WritePort => { @@ -102,9 +102,9 @@ object Utils { numW += 1 MacroPort( width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_address", ActiveHigh), - clock=PolarizedPort(s"${portName}_clock", PositiveEdge), - writeEnable=Some(PolarizedPort(s"${portName}_wen", ActiveHigh)), + address=PolarizedPort(s"${portName}_addr", ActiveHigh), + clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + writeEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), input=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) ) } case MaskWritePort => { @@ -112,9 +112,9 @@ object Utils { numW += 1 MacroPort( width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_address", ActiveHigh), - clock=PolarizedPort(s"${portName}_clock", PositiveEdge), - writeEnable=Some(PolarizedPort(s"${portName}_wen", ActiveHigh)), + address=PolarizedPort(s"${portName}_addr", ActiveHigh), + clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + writeEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), maskPort=Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), maskGran=maskGran, input=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) @@ -124,10 +124,10 @@ object Utils { numRW += 1 MacroPort( width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_address", ActiveHigh), - clock=PolarizedPort(s"${portName}_clock", PositiveEdge), - writeEnable=Some(PolarizedPort(s"${portName}_wen", ActiveHigh)), - readEnable=Some(PolarizedPort(s"${portName}_ren", ActiveHigh)), + address=PolarizedPort(s"${portName}_addr", ActiveHigh), + clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + writeEnable=Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), input=Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), output=Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) ) } @@ -136,11 +136,11 @@ object Utils { numRW += 1 MacroPort( width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_address", ActiveHigh), - clock=PolarizedPort(s"${portName}_clock", PositiveEdge), - writeEnable=Some(PolarizedPort(s"${portName}_wen", ActiveHigh)), - readEnable=Some(PolarizedPort(s"${portName}_ren", ActiveHigh)), - maskPort=Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), + address=PolarizedPort(s"${portName}_addr", ActiveHigh), + clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + writeEnable=Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), + maskPort=Some(PolarizedPort(s"${portName}_wmask", ActiveHigh)), maskGran=maskGran, input=Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), output=Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) From efd2f09b21fdd5b217cf05e44b5f320f9757639c Mon Sep 17 00:00:00 2001 From: John Wright Date: Tue, 12 Feb 2019 16:00:54 -0800 Subject: [PATCH 120/273] Naming consistency (memMode -> memFormat) --- macros/src/main/scala/MacroCompiler.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index f04825375..118094173 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -74,14 +74,14 @@ object MacroCompilerAnnotation { * Parameters associated to this MacroCompilerAnnotation. * * @param mem Path to memory lib - * @param memMode Type of memory lib (Some("conf"), Some("mdf"), or None (defaults to mdf)) + * @param memFormat Type of memory lib (Some("conf"), Some("mdf"), or None (defaults to mdf)) * @param lib Path to library lib or None if no libraries * @param costMetric Cost metric to use * @param mode Compiler mode (see CompilerMode) * @param forceCompile Set of memories to force compiling to lib regardless of the mode * @param forceSynflops Set of memories to force compiling as flops regardless of the mode */ - case class Params(mem: String, memMode: Option[String], lib: Option[String], costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean, + case class Params(mem: String, memFormat: Option[String], lib: Option[String], costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean, forceCompile: Set[String], forceSynflops: Set[String]) /** From 1f58ea1e141c69276ae1680a9ece6c42a31653e5 Mon Sep 17 00:00:00 2001 From: John Wright Date: Wed, 13 Feb 2019 10:02:34 -0800 Subject: [PATCH 121/273] Style/Comments from review of #35 --- macros/src/main/scala/MacroCompiler.scala | 7 +++---- macros/src/main/scala/MemConf.scala | 6 +++--- macros/src/main/scala/Utils.scala | 4 ++-- .../src/main/scala/transforms/RemoveUnusedModules.scala | 4 ++++ 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 118094173..a51ac629d 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -755,10 +755,9 @@ object MacroCompiler extends App { def run(args: List[String]) { val (params, costParams, forcedMemories) = parseArgs(Map[MacroParam, String](), Map[String, String](), (Set.empty, Set.empty), args) try { - val macros = if (params.get(MacrosFormat) == Some("conf")) { - Utils.filterForSRAM(Utils.readConfFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) - } else { - Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) + val macros = params.get(MacrosFormat) match { + case Some("conf") => Utils.filterForSRAM(Utils.readConfFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) + case _ => Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) } if (macros.nonEmpty) { diff --git a/macros/src/main/scala/MemConf.scala b/macros/src/main/scala/MemConf.scala index d277c066d..ded4a8896 100644 --- a/macros/src/main/scala/MemConf.scala +++ b/macros/src/main/scala/MemConf.scala @@ -8,13 +8,13 @@ sealed abstract class MemPort(val name: String) { override def toString = name } case object ReadPort extends MemPort("read") case object WritePort extends MemPort("write") -case object MaskWritePort extends MemPort("mwrite") +case object MaskedWritePort extends MemPort("mwrite") case object ReadWritePort extends MemPort("rw") -case object MaskReadWritePort extends MemPort("mrw") +case object MaskedReadWritePort extends MemPort("mrw") object MemPort { - val all = Set(ReadPort, WritePort, MaskWritePort, ReadWritePort, MaskReadWritePort) + val all = Set(ReadPort, WritePort, MaskedWritePort, ReadWritePort, MaskedReadWritePort) def apply(s: String): Option[MemPort] = MemPort.all.find(_.name == s) diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index ee9ef3b11..7e6455933 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -107,7 +107,7 @@ object Utils { writeEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), input=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) ) } - case MaskWritePort => { + case MaskedWritePort => { val portName = s"W${numW}" numW += 1 MacroPort( @@ -131,7 +131,7 @@ object Utils { input=Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), output=Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) ) } - case MaskReadWritePort => { + case MaskedReadWritePort => { val portName = s"RW${numRW}" numRW += 1 MacroPort( diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala index 410c7326b..470817a06 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -8,6 +8,8 @@ import firrtl.passes.Pass import firrtl.annotations.{SingleTargetAnnotation, Annotation} import firrtl.transforms.DontTouchAnnotation +// Removes all the unused modules in a circuit by recursing through every +// instance (starting at the main module) class RemoveUnusedModules extends Transform { def inputForm = MidForm def outputForm = MidForm @@ -48,11 +50,13 @@ class RemoveUnusedModules extends Transform { val renames = state.renames.getOrElse(RenameMap()) + // This is what the annotation filter should look like, but for some reason it doesn't work. //state.circuit.modules.filterNot { usedModuleSet contains _.name } foreach { x => renames.record(ModuleTarget(state.circuit.main, x.name), Seq()) } val newCircuit = Circuit(state.circuit.info, usedModuleSeq, state.circuit.main) val newAnnos = AnnotationSeq(state.annotations.toSeq.filter { _ match { // XXX This is wrong, but it works for now + // Tracked by https://github.com/ucb-bar/barstools/issues/36 case x: DontTouchAnnotation => false //case x: DontTouchAnnotation => usedModuleNames contains x.target.module case _ => true From 9d505d6063f07f7750686f67d2cda49b17f6d898 Mon Sep 17 00:00:00 2001 From: James Dunn Date: Wed, 13 Feb 2019 15:17:12 -0800 Subject: [PATCH 122/273] Fixed index offset in mask port mapping. (#38) Fixed index offset in mask port mapping. --- macros/src/main/scala/MacroCompiler.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index a51ac629d..8604b439b 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -432,7 +432,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // zero out the upper bits. zero } else { - if (i >= memPort.src.width.get) { + if ((low + i) >= memPort.src.width.get) { // If our bit is larger than the whole width of the mem, just zero out the upper bits. zero } else { From a10a6cca357e2fab647da0026095e87c4eaddfd6 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Fri, 1 Mar 2019 18:52:41 -0800 Subject: [PATCH 123/273] Add SimDTM to list of extmodules --- tapeout/src/main/scala/transforms/Generate.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 8d4caccc5..629de58c3 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -84,7 +84,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => private def harnessTransforms: Seq[Transform] = { // XXX this is a hack, we really should be checking the masters to see if they are ExtModules - val externals = Set(harnessTop.get, synTop.get, "SimSerial") + val externals = Set(harnessTop.get, synTop.get, "SimSerial", "SimDTM") Seq( new ConvertToExtMod((m) => m.name == synTop.get), new RemoveUnusedModules, From 45278a6de0e0af3995cbdb8b38813b5397d9e633 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Mon, 11 Mar 2019 07:54:18 -0700 Subject: [PATCH 124/273] Make SRAM per port clocks optional Connects to whatever clock ports are available --- macros/src/main/scala/MacroCompiler.scala | 10 ++++++---- macros/src/main/scala/SynFlops.scala | 6 +++--- macros/src/main/scala/Utils.scala | 12 ++++++------ mdf | 2 +- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 8604b439b..c496a45df 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -302,7 +302,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], case (None, None) => one } selectRegs(ref.name) = WRef(regName, tpe) - stmts += DefRegister(NoInfo, regName, tpe, WRef(port.clock.name), zero, WRef(regName)) + stmts += DefRegister(NoInfo, regName, tpe, WRef(port.clock.get.name), zero, WRef(regName)) stmts += Connect(NoInfo, WRef(regName), Mux(enable, WRef(nodeName), WRef(regName), tpe)) } } @@ -348,9 +348,11 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Clock port mapping /* Palmer: FIXME: I don't handle memories with read/write clocks yet. */ - stmts += connectPorts(WRef(memPort.src.clock.name), - libPort.src.clock.name, - libPort.src.clock.polarity) + /* Colin not all libPorts have clocks but all memPorts do*/ + libPort.src.clock.foreach { cPort => + stmts += connectPorts(WRef(memPort.src.clock.get.name), + cPort.name, + cPort.polarity) } // Adress port mapping /* Palmer: The address port to a memory is just the low-order bits of diff --git a/macros/src/main/scala/SynFlops.scala b/macros/src/main/scala/SynFlops.scala index 48ee368ca..1e7a4d7c1 100644 --- a/macros/src/main/scala/SynFlops.scala +++ b/macros/src/main/scala/SynFlops.scala @@ -38,7 +38,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa ) val readConnects = lib.readers.zipWithIndex flatMap { case (r, i) => - val clock = portToExpression(r.src.clock) + val clock = portToExpression(r.src.clock.get) val address = portToExpression(r.src.address) val enable = (r.src chipEnable, r.src readEnable) match { case (Some(en_port), Some(re_port)) => @@ -63,7 +63,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa } val writeConnects = lib.writers.zipWithIndex flatMap { case (w, i) => - val clock = portToExpression(w.src.clock) + val clock = portToExpression(w.src.clock.get) val address = portToExpression(w.src.address) val enable = (w.src.chipEnable, w.src.writeEnable) match { case (Some(en), Some(we)) => @@ -95,7 +95,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa } val readwriteConnects = lib.readwriters.zipWithIndex flatMap { case (rw, i) => - val clock = portToExpression(rw.src.clock) + val clock = portToExpression(rw.src.clock.get) val address = portToExpression(rw.src.address) val wmode = rw.src.writeEnable match { case Some(we) => portToExpression(we) diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 7e6455933..b188ac273 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -24,8 +24,8 @@ class FirrtlMacroPort(port: MacroPort) { // Bundle representing this macro port. val tpe = BundleType(Seq( - Field(port.clock.name, Flip, ClockType), Field(port.address.name, Flip, addrType)) ++ + (port.clock map (p => Field(p.name, Flip, ClockType))) ++ (port.input map (p => Field(p.name, Flip, dataType))) ++ (port.output map (p => Field(p.name, Default, dataType))) ++ (port.chipEnable map (p => Field(p.name, Flip, BoolType))) ++ @@ -93,7 +93,7 @@ object Utils { MacroPort( width=Some(width), depth=Some(depth), address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), output=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) ) } @@ -103,7 +103,7 @@ object Utils { MacroPort( width=Some(width), depth=Some(depth), address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), writeEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), input=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) ) } @@ -113,7 +113,7 @@ object Utils { MacroPort( width=Some(width), depth=Some(depth), address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), writeEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), maskPort=Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), maskGran=maskGran, @@ -125,7 +125,7 @@ object Utils { MacroPort( width=Some(width), depth=Some(depth), address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), writeEnable=Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), input=Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), @@ -137,7 +137,7 @@ object Utils { MacroPort( width=Some(width), depth=Some(depth), address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=PolarizedPort(s"${portName}_clk", PositiveEdge), + clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), writeEnable=Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), maskPort=Some(PolarizedPort(s"${portName}_wmask", ActiveHigh)), diff --git a/mdf b/mdf index c13e31656..88478cd2a 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit c13e31656e1ce572e03acf465beb00e157dcbc06 +Subproject commit 88478cd2adf3fa9de12be3d066af4fc8b304a23a From a0510e66647ac4a5bda43b96b352d976273aec1c Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Fri, 15 Mar 2019 16:08:55 -0700 Subject: [PATCH 125/273] Change cost to double from BigInt and fix default metric I don't think it was adding anything and now we can get rid of the weird +1/-1 --- macros/src/main/scala/CostMetric.scala | 24 +++++++++++------------ macros/src/main/scala/MacroCompiler.scala | 2 +- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/macros/src/main/scala/CostMetric.scala b/macros/src/main/scala/CostMetric.scala index afcb26606..ce95a8614 100644 --- a/macros/src/main/scala/CostMetric.scala +++ b/macros/src/main/scala/CostMetric.scala @@ -18,7 +18,7 @@ trait CostMetric extends Serializable { * @return The cost of this compile, defined by this cost metric, or None if * it cannot be compiled. */ - def cost(mem: Macro, lib: Macro): Option[BigInt] + def cost(mem: Macro, lib: Macro): Option[Double] /** * Helper function to return the map of arguments (or an empty map if there are none). @@ -43,7 +43,7 @@ trait CostMetricCompanion { * TODO: figure out what is the difference between this metric and the current * default metric and either revive or delete this metric. */ object OldMetric extends CostMetric with CostMetricCompanion { - override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + override def cost(mem: Macro, lib: Macro): Option[Double] = { /* Palmer: A quick cost function (that must be kept in sync with * memory_cost()) that attempts to avoid compiling unncessary * memories. This is a lower bound on the cost of compiling a @@ -61,9 +61,9 @@ object OldMetric extends CostMetric with CostMetricCompanion { /** * An external cost function. * Calls the specified path with paths to the JSON MDF representation of the mem - * and lib macros. The external executable should print a BigInt. + * and lib macros. The external executable should print a Double. * None will be returned if the external executable does not print a valid - * BigInt. + * Double. */ class ExternalMetric(path: String) extends CostMetric { import mdf.macrolib.Utils.writeMacroToPath @@ -71,7 +71,7 @@ class ExternalMetric(path: String) extends CostMetric { import scala.language.postfixOps // for !! postfix op import sys.process._ - override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + override def cost(mem: Macro, lib: Macro): Option[Double] = { // Create temporary files. val memFile = File.createTempFile("_macrocompiler_mem_", ".json") val libFile = File.createTempFile("_macrocompiler_lib_", ".json") @@ -87,7 +87,7 @@ class ExternalMetric(path: String) extends CostMetric { libFile.delete() try { - Some(BigInt(result)) + Some(result.toDouble) } catch { case e: NumberFormatException => None } @@ -113,18 +113,16 @@ object ExternalMetric extends CostMetricCompanion { /** The current default metric in barstools, re-defined by Donggyu. */ // TODO: write tests for this function to make sure it selects the right things object DefaultMetric extends CostMetric with CostMetricCompanion { - override def cost(mem: Macro, lib: Macro): Option[BigInt] = { + override def cost(mem: Macro, lib: Macro): Option[Double] = { val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) val memWidth = (memMask, libMask) match { - case (Some(1), Some(1)) | (None, _) => mem.src.width + case (Some(_), Some(1)) | (None, _) => mem.src.width case (Some(p), _) => p // assume that the memory consists of smaller chunks } - return Some( - (((mem.src.depth - 1) / lib.src.depth) + 1) * - (((memWidth - 1) / lib.src.width) + 1) * - (lib.src.depth * lib.src.width + 1) // weights on # cells - ) + val depthCost = (mem.src.depth.toDouble / lib.src.depth.toDouble) + val widthCost = (memWidth.toDouble / lib.src.width.toDouble) + return Some(depthCost * widthCost) } override def commandLineParams = Map() diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index c496a45df..0ffd0ce53 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -565,7 +565,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Try to compile mem against each lib in libs, keeping track of the // best compiled version, external lib used, and cost. - val (best, cost) = (libs foldLeft (None: Option[(Module, ExtModule)], BigInt(Long.MaxValue))){ + val (best, cost) = (fullLibs foldLeft (None: Option[(Module, ExtModule)], Double.MaxValue)){ case ((best, cost), lib) if mem.src.ports.size != lib.src.ports.size => /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same * order, but I'm starting with what actually gets generated. */ From 98a410812c03f109b6c4d456ae5c484f9a62e643 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Sat, 16 Mar 2019 02:42:05 -0700 Subject: [PATCH 126/273] Filter compiler libraries before mapping The filter is always by family and maskability and then by any integral mappings. --- macros/src/main/scala/MacroCompiler.scala | 38 ++++++++++++++++++++--- macros/src/main/scala/Utils.scala | 14 ++++++++- 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 0ffd0ce53..4250ad6e8 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -15,7 +15,7 @@ import firrtl.Utils._ import firrtl.annotations._ import firrtl.transforms.{NoDCEAnnotation} import firrtl.CompilerUtils.getLoweringTransforms -import mdf.macrolib.{PolarizedPort, PortPolarity} +import mdf.macrolib.{PolarizedPort, PortPolarity, SRAMMacro, SRAMGroup, SRAMCompiler} import scala.collection.mutable.{ArrayBuffer, HashMap} import java.io.{File, FileWriter} import Utils._ @@ -103,6 +103,7 @@ object MacroCompilerAnnotation { class MacroCompilerPass(mems: Option[Seq[Macro]], libs: Option[Seq[Macro]], + compilers: Option[SRAMCompiler], costMetric: CostMetric = CostMetric.default, mode: MacroCompilerAnnotation.CompilerMode = MacroCompilerAnnotation.Default) extends firrtl.passes.Pass { // Helper function to check the legality of bitPairs. @@ -563,6 +564,29 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // in the 'circuit'. (mems foldLeft c.modules){ (modules, mem) => + val sram = mem.src + def groupMatchesMask(group: SRAMGroup, mem:SRAMMacro): Boolean = { + val memMask = mem.ports map (_.maskGran) find (_.isDefined) map (_.get) + val libMask = group.ports map (_.maskGran) find (_.isDefined) map (_.get) + (memMask, libMask) match { + case (_, Some(1)) => true + case (None, _) => true + case (Some(_), None) => false + case (Some(m), Some(l)) => l <= m //Ignore memories that don't have nice mask + } + } + // Add compiler memories that might map well to libs + val compLibs = compilers match { + case Some(SRAMCompiler(_, groups)) => { + groups.filter(g => g.family == sram.family && groupMatchesMask(g, sram)).map( g => { + for(w <- g.width; d <- g.depth if((sram.width % w == 0) && (sram.depth % d == 0))) + yield Seq(new Macro(buildSRAMMacro(g, d, w, g.vt.head))) + } ) + } + case None => Seq() + } + val fullLibs = libs ++ compLibs.flatten.flatten + // Try to compile mem against each lib in libs, keeping track of the // best compiled version, external lib used, and cost. val (best, cost) = (fullLibs foldLeft (None: Option[(Module, ExtModule)], Double.MaxValue)){ @@ -631,14 +655,20 @@ class MacroCompilerTransform extends Transform { case _ => None } val libs: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { + case Some(x:Seq[mdf.macrolib.Macro]) => + Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) + case _ => None + } + val compilers: Option[mdf.macrolib.SRAMCompiler] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { case Some(x:Seq[mdf.macrolib.Macro]) => if(useCompiler){ - findSRAMCompiler(Some(x)).map{x => buildSRAMMacros(x).map(new Macro(_)) } + findSRAMCompiler(Some(x)) } - else Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) + else None case _ => None } + // Helper function to turn a set of mem names into a Seq[Macro]. def setToSeqMacro(names: Set[String]): Seq[Macro] = { names.toSeq.map(memName => mems.get.collectFirst { case m if m.src.name == memName => m }.get) @@ -655,7 +685,7 @@ class MacroCompilerTransform extends Transform { }.getOrElse(Seq.empty) val transforms = Seq( - new MacroCompilerPass(memCompile, libs, costMetric, mode), + new MacroCompilerPass(memCompile, libs, compilers, costMetric, mode), new SynFlopsPass(true, memSynflops ++ (if (mode == MacroCompilerAnnotation.CompileAndSynflops) { libs.get } else { diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index b188ac273..998477d32 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -78,9 +78,18 @@ object Utils { } def readConfFromString(str: String): Seq[mdf.macrolib.Macro] = { MemConf.fromString(str).map { m:MemConf => - SRAMMacro(m.name, m.width, m.depth, "", Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, m.ports), Seq.empty[MacroExtraPort]) + SRAMMacro(m.name, m.width, m.depth, Utils.portSpecToFamily(m.ports), Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, m.ports), Seq.empty[MacroExtraPort]) } } + def portSpecToFamily(ports: Seq[MemPort]): String = { + val numR = ports.count(_ match { case ReadPort => true; case _ => false}) + val numW = ports.count(_ match { case WritePort|MaskedWritePort => true; case _ => false}) + val numRW = ports.count(_ match { case ReadWritePort|MaskedReadWritePort => true; case _ => false}) + val numRStr = if(numR > 0) s"${numR}r" else "" + val numWStr = if(numW > 0) s"${numW}w" else "" + val numRWStr = if(numRW > 0) s"${numRW}rw" else "" + return numRStr + numWStr + numRWStr + } // This translates between two represenations of ports def portSpecToMacroPort(width: Int, depth: Int, maskGran: Option[Int], ports: Seq[MemPort]): Seq[MacroPort] = { var numR = 0 @@ -160,6 +169,9 @@ object Utils { for (g <- s.groups; d <- g.depth; w <- g.width; vt <- g.vt) yield mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), g.extraPorts) } + def buildSRAMMacro(g: mdf.macrolib.SRAMGroup, d: Int, w: Int, vt: String): mdf.macrolib.SRAMMacro = { + return mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), g.extraPorts) + } def makeName(g: mdf.macrolib.SRAMGroup, depth: Int, width: Int, vt: String): String = { g.name.foldLeft(""){ (builder, next) => next match { From 6cdf978a6d21f5b2dec41ed5010897817a150687 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Sun, 17 Mar 2019 08:24:20 -0700 Subject: [PATCH 127/273] Fix forms of passes to happen before replseqmem This ensures the conf file doesn't have any testharness memories, which are too big and break downstream tools --- tapeout/src/main/scala/transforms/ConvertToExtModPass.scala | 4 ++-- tapeout/src/main/scala/transforms/RemoveUnusedModules.scala | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala index 46e12ed0d..5ef90a255 100644 --- a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala +++ b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala @@ -25,8 +25,8 @@ class ConvertToExtModPass(classify: (Module) => Boolean) extends Pass { } } class ConvertToExtMod(classify: (Module) => Boolean) extends Transform with SeqTransformBased { - def inputForm = MidForm - def outputForm = MidForm + def inputForm = HighForm + def outputForm = HighForm def transforms = Seq(new ConvertToExtModPass(classify)) def execute(state: CircuitState): CircuitState = { diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala index 470817a06..e4edbd741 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -11,8 +11,8 @@ import firrtl.transforms.DontTouchAnnotation // Removes all the unused modules in a circuit by recursing through every // instance (starting at the main module) class RemoveUnusedModules extends Transform { - def inputForm = MidForm - def outputForm = MidForm + def inputForm = HighForm + def outputForm = HighForm def execute(state: CircuitState): CircuitState = { val modulesByName = state.circuit.modules.map{ From 44e97826d4b7c3fe4b49d44ce5ac04b52b595ab7 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Sun, 17 Mar 2019 14:05:51 -0700 Subject: [PATCH 128/273] Fix cost metric for non Compiler libs Also a small fix from reviewer --- macros/src/main/scala/CostMetric.scala | 11 ++++++++++- macros/src/main/scala/MacroCompiler.scala | 1 - 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/macros/src/main/scala/CostMetric.scala b/macros/src/main/scala/CostMetric.scala index ce95a8614..e940cd8e5 100644 --- a/macros/src/main/scala/CostMetric.scala +++ b/macros/src/main/scala/CostMetric.scala @@ -122,7 +122,16 @@ object DefaultMetric extends CostMetric with CostMetricCompanion { } val depthCost = (mem.src.depth.toDouble / lib.src.depth.toDouble) val widthCost = (memWidth.toDouble / lib.src.width.toDouble) - return Some(depthCost * widthCost) + val bitsCost = (lib.src.depth * lib.src.width) + // The most complicated case occurs when you have a 1x1 lib and a 1Mx1M lib and a third sane lib. + // In this case you want to ensure that a lib slightly smaller or larger than mem as the third lib + // will always be selected over the stupid libs. + if(widthCost < 1 && depthCost < 1) Some(bitsCost) // If the lib is bigger than pick the smallest lib + // If its not bigger in both dimensions pick the smallest in the dimension that the lib is larger in + else if(widthCost < 1) Some(depthCost * lib.src.width) + else if(depthCost < 1) Some(widthCost * lib.src.depth) + // If the lib is equal or smaller than source mem pick the largest lib + else Some(depthCost * widthCost) } override def commandLineParams = Map() diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 4250ad6e8..7a76e7561 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -569,7 +569,6 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], val memMask = mem.ports map (_.maskGran) find (_.isDefined) map (_.get) val libMask = group.ports map (_.maskGran) find (_.isDefined) map (_.get) (memMask, libMask) match { - case (_, Some(1)) => true case (None, _) => true case (Some(_), None) => false case (Some(m), Some(l)) => l <= m //Ignore memories that don't have nice mask From 0b9d74ada7e3271e82d665b09b3b9ff087c70f91 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Sun, 17 Mar 2019 21:26:29 -0700 Subject: [PATCH 129/273] Fix unit tests update cost function once more bump mdf to master --- macros/src/main/scala/CostMetric.scala | 27 +-- macros/src/main/scala/MacroCompiler.scala | 4 +- macros/src/test/scala/CostFunction.scala | 6 +- macros/src/test/scala/MacroCompilerSpec.scala | 6 +- macros/src/test/scala/SimpleSplitDepth.scala | 20 +-- macros/src/test/scala/SpecificExamples.scala | 155 +++++++----------- macros/src/test/scala/SynFlops.scala | 16 +- mdf | 2 +- 8 files changed, 100 insertions(+), 136 deletions(-) diff --git a/macros/src/main/scala/CostMetric.scala b/macros/src/main/scala/CostMetric.scala index e940cd8e5..16f1da5df 100644 --- a/macros/src/main/scala/CostMetric.scala +++ b/macros/src/main/scala/CostMetric.scala @@ -117,21 +117,22 @@ object DefaultMetric extends CostMetric with CostMetricCompanion { val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) val memWidth = (memMask, libMask) match { - case (Some(_), Some(1)) | (None, _) => mem.src.width - case (Some(p), _) => p // assume that the memory consists of smaller chunks + case (None, _) => mem.src.width + case (Some(p), None) => (mem.src.width/p)*math.ceil(p.toDouble/lib.src.width)*lib.src.width //We map the mask to distinct memories + case (Some(p), Some(m)) => { + if(m <= p) (mem.src.width/p)*math.ceil(p.toDouble/m)*m //Using multiple m's to create a p (integeraly) + else (mem.src.width/p)*m //Waste the extra maskbits + } } - val depthCost = (mem.src.depth.toDouble / lib.src.depth.toDouble) - val widthCost = (memWidth.toDouble / lib.src.width.toDouble) + val depthCost = math.ceil(mem.src.depth.toDouble / lib.src.depth.toDouble) + val widthCost = math.ceil(memWidth.toDouble / lib.src.width.toDouble) val bitsCost = (lib.src.depth * lib.src.width) - // The most complicated case occurs when you have a 1x1 lib and a 1Mx1M lib and a third sane lib. - // In this case you want to ensure that a lib slightly smaller or larger than mem as the third lib - // will always be selected over the stupid libs. - if(widthCost < 1 && depthCost < 1) Some(bitsCost) // If the lib is bigger than pick the smallest lib - // If its not bigger in both dimensions pick the smallest in the dimension that the lib is larger in - else if(widthCost < 1) Some(depthCost * lib.src.width) - else if(depthCost < 1) Some(widthCost * lib.src.depth) - // If the lib is equal or smaller than source mem pick the largest lib - else Some(depthCost * widthCost) + // Fraction of wasted bits plus const per mem + val requestedBits = mem.src.depth * mem.src.width + val bitsWasted = depthCost*widthCost*bitsCost - requestedBits + val wastedConst = 0.05 // 0 means waste as few bits with no regard for instance count + val costPerInst = wastedConst*depthCost*widthCost + Some(1.0*bitsWasted/requestedBits+costPerInst) } override def commandLineParams = Map() diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 7a76e7561..e2d7974ff 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -393,12 +393,14 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], /* Palmer: The input port to a memory just needs to happen in parallel, * this does a part select to narrow the memory down. */ stmts += connectPorts(bits(WRef(mem), high, low), lib, lib_polarity) - case (None, Some(lib)) => + case (None, Some(lib)) => /* Palmer: If the inner memory has an input port but the other * one doesn't then it's safe to just leave the inner * port floating. This should be handled by the * default value of the write enable, so nothing should * every make it into the memory. */ + //Firrtl cares about dangling inputs now tie it off + stmts += IsInvalid(NoInfo, WSubField(inst, lib.name)) case (None, None) => /* Palmer: If there's no input ports at all (ie, read-only * port on the memory) then just don't worry about it, diff --git a/macros/src/test/scala/CostFunction.scala b/macros/src/test/scala/CostFunction.scala index b0fca093d..e2dbe03fa 100644 --- a/macros/src/test/scala/CostFunction.scala +++ b/macros/src/test/scala/CostFunction.scala @@ -10,7 +10,7 @@ import mdf.macrolib._ */ object TestMinWidthMetric extends CostMetric with CostMetricCompanion { // Smaller width = lower cost = favoured - override def cost(mem: Macro, lib: Macro): Option[BigInt] = Some(lib.src.width) + override def cost(mem: Macro, lib: Macro): Option[Double] = Some(lib.src.width) override def commandLineParams = Map() override def name = "TestMinWidthMetric" @@ -68,8 +68,8 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { """ circuit target_memory : module target_memory : - input clk : Clock input addr : UInt<10> + input clk : Clock input din : UInt<128> output dout : UInt<128> input write_en : UInt<1> @@ -102,8 +102,8 @@ circuit target_memory : dout <= mux(UInt<1>("h1"), dout_0, UInt<1>("h0")) extmodule SRAM_WIDTH_32 : - input clk : Clock input addr : UInt<10> + input clk : Clock input din : UInt<32> output dout : UInt<32> input write_en : UInt<1> diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 40c613ed8..ba420fdd2 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -99,7 +99,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate val macros = mems map (_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) val passes = Seq( - new MacroCompilerPass(Some(mems), libs, getCostMetric, if (synflops) MacroCompilerAnnotation.Synflops else MacroCompilerAnnotation.Default), + new MacroCompilerPass(Some(mems), libs, None, getCostMetric, if (synflops) MacroCompilerAnnotation.Synflops else MacroCompilerAnnotation.Default), new SynFlopsPass(synflops, libs getOrElse mems), RemoveEmpty) val result: Circuit = (passes foldLeft circuit)((c, pass) => pass run c) @@ -140,7 +140,7 @@ trait HasSRAMGenerator { MacroPort( address = PolarizedPort(name = realPrefix + "addr", polarity = ActiveHigh), - clock = PolarizedPort(name = realPrefix + "clk", polarity = PositiveEdge), + clock = Some(PolarizedPort(name = realPrefix + "clk", polarity = PositiveEdge)), readEnable = if (readEnable) Some(PolarizedPort(name = realPrefix + "read_en", polarity = ActiveHigh)) else None, writeEnable = if (writeEnable) Some(PolarizedPort(name = realPrefix + "write_en", polarity = ActiveHigh)) else None, @@ -350,8 +350,8 @@ trait HasSimpleTestGenerator { } val extraPortsStr = extraPorts.map { case (name, bits) => s" input $name : UInt<$bits>" }.mkString("\n") s""" - input ${realPrefix}clk : Clock input ${realPrefix}addr : UInt<$addrWidth> + input ${realPrefix}clk : Clock $writeStr $readStr $readEnableStr diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 6a623447f..448dd06e2 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -255,8 +255,8 @@ class SplitDepth2048x8_extraPort extends MacroCompilerSpec with HasSRAMGenerator """ circuit target_memory : module target_memory : - input outer_clk : Clock input outer_addr : UInt<11> + input outer_clk : Clock input outer_din : UInt<8> output outer_dout : UInt<8> input outer_write_en : UInt<1> @@ -287,8 +287,8 @@ circuit target_memory : node outer_dout_1 = outer_dout_1_0 outer_dout <= mux(eq(outer_addr_sel_reg, UInt<1>("h0")), outer_dout_0, mux(eq(outer_addr_sel_reg, UInt<1>("h1")), outer_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : - input lib_clk : Clock input lib_addr : UInt<10> + input lib_clk : Clock input lib_din : UInt<8> output lib_dout : UInt<8> input lib_write_en : UInt<1> @@ -345,11 +345,11 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener """ circuit target_memory : module target_memory : - input outerB_clk : Clock input outerB_addr : UInt<11> + input outerB_clk : Clock output outerB_dout : UInt<8> - input outerA_clk : Clock input outerA_addr : UInt<11> + input outerA_clk : Clock input outerA_din : UInt<8> input outerA_write_en : UInt<1> @@ -379,11 +379,11 @@ circuit target_memory : outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : - input innerA_clk : Clock input innerA_addr : UInt<10> + input innerA_clk : Clock output innerA_dout : UInt<8> - input innerB_clk : Clock input innerB_addr : UInt<10> + input innerB_clk : Clock input innerB_din : UInt<8> input innerB_write_en : UInt<1> @@ -506,11 +506,11 @@ class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerato """ circuit target_memory : module target_memory : - input outerB_clk : Clock input outerB_addr : UInt<11> + input outerB_clk : Clock output outerB_dout : UInt<8> - input outerA_clk : Clock input outerA_addr : UInt<11> + input outerA_clk : Clock input outerA_din : UInt<8> input outerA_write_en : UInt<1> input outerA_mask : UInt<1> @@ -543,11 +543,11 @@ circuit target_memory : outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : - input innerA_clk : Clock input innerA_addr : UInt<10> + input innerA_clk : Clock output innerA_dout : UInt<8> - input innerB_clk : Clock input innerB_addr : UInt<10> + input innerB_clk : Clock input innerB_din : UInt<8> input innerB_write_en : UInt<1> input innerB_mask : UInt<8> diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 2ca1ddf03..694911ee3 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -203,12 +203,12 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { """ circuit smem_0_ext : module _T_182_ext : - input R0_clk : Clock input R0_addr : UInt<6> + input R0_clk : Clock output R0_data : UInt<88> input R0_en : UInt<1> - input W0_clk : Clock input W0_addr : UInt<6> + input W0_clk : Clock input W0_data : UInt<88> input W0_en : UInt<1> input W0_mask : UInt<4> @@ -249,24 +249,28 @@ circuit smem_0_ext : mem_0_0.CE2 <= R0_clk mem_0_0.A2 <= R0_addr node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) + mem_0_0.I2 is invalid mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) mem_0_1.CE2 <= R0_clk mem_0_1.A2 <= R0_addr node R0_data_0_1 = bits(mem_0_1.O2, 21, 0) + mem_0_1.I2 is invalid mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) mem_0_2.CE2 <= R0_clk mem_0_2.A2 <= R0_addr node R0_data_0_2 = bits(mem_0_2.O2, 21, 0) + mem_0_2.I2 is invalid mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_2.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) mem_0_3.CE2 <= R0_clk mem_0_3.A2 <= R0_addr node R0_data_0_3 = bits(mem_0_3.O2, 21, 0) + mem_0_3.I2 is invalid mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) @@ -302,56 +306,42 @@ circuit smem_0_ext : mem_1_0.CE2 <= R0_clk mem_1_0.A2 <= R0_addr node R0_data_1_0 = bits(mem_1_0.O2, 21, 0) + mem_1_0.I2 is invalid mem_1_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) mem_1_1.CE2 <= R0_clk mem_1_1.A2 <= R0_addr node R0_data_1_1 = bits(mem_1_1.O2, 21, 0) + mem_1_1.I2 is invalid mem_1_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) mem_1_2.CE2 <= R0_clk mem_1_2.A2 <= R0_addr node R0_data_1_2 = bits(mem_1_2.O2, 21, 0) + mem_1_2.I2 is invalid mem_1_2.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_2.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) mem_1_3.CE2 <= R0_clk mem_1_3.A2 <= R0_addr node R0_data_1_3 = bits(mem_1_3.O2, 21, 0) + mem_1_3.I2 is invalid mem_1_3.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) node R0_data_1 = cat(R0_data_1_3, cat(R0_data_1_2, cat(R0_data_1_1, R0_data_1_0))) R0_data <= mux(eq(R0_addr_sel_reg, UInt<1>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<1>("h1")), R0_data_1, UInt<1>("h0"))) - extmodule my_sram_2rw_32x22 : - input CE1 : Clock - input A1 : UInt<5> - input I1 : UInt<22> - output O1 : UInt<22> - input CSB1 : UInt<1> - input OEB1 : UInt<1> - input WEB1 : UInt<1> - input CE2 : Clock - input A2 : UInt<5> - input I2 : UInt<22> - output O2 : UInt<22> - input CSB2 : UInt<1> - input OEB2 : UInt<1> - input WEB2 : UInt<1> - - defname = my_sram_2rw_32x22 - module _T_84_ext : - input R0_clk : Clock input R0_addr : UInt<9> + input R0_clk : Clock output R0_data : UInt<64> input R0_en : UInt<1> - input W0_clk : Clock input W0_addr : UInt<9> + input W0_clk : Clock input W0_data : UInt<64> input W0_en : UInt<1> input W0_mask : UInt<1> @@ -378,12 +368,14 @@ circuit smem_0_ext : mem_0_0.CE2 <= R0_clk mem_0_0.A2 <= R0_addr node R0_data_0_0 = bits(mem_0_0.O2, 31, 0) + mem_0_0.I2 is invalid mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h0")))) mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h0")))) mem_0_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h0")))) mem_0_1.CE2 <= R0_clk mem_0_1.A2 <= R0_addr node R0_data_0_1 = bits(mem_0_1.O2, 31, 0) + mem_0_1.I2 is invalid mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h0")))) mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h0")))) mem_0_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h0")))) @@ -405,12 +397,14 @@ circuit smem_0_ext : mem_1_0.CE2 <= R0_clk mem_1_0.A2 <= R0_addr node R0_data_1_0 = bits(mem_1_0.O2, 31, 0) + mem_1_0.I2 is invalid mem_1_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h1")))) mem_1_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h1")))) mem_1_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h1")))) mem_1_1.CE2 <= R0_clk mem_1_1.A2 <= R0_addr node R0_data_1_1 = bits(mem_1_1.O2, 31, 0) + mem_1_1.I2 is invalid mem_1_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h1")))) mem_1_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h1")))) mem_1_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h1")))) @@ -432,12 +426,14 @@ circuit smem_0_ext : mem_2_0.CE2 <= R0_clk mem_2_0.A2 <= R0_addr node R0_data_2_0 = bits(mem_2_0.O2, 31, 0) + mem_2_0.I2 is invalid mem_2_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h2")))) mem_2_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h2")))) mem_2_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h2")))) mem_2_1.CE2 <= R0_clk mem_2_1.A2 <= R0_addr node R0_data_2_1 = bits(mem_2_1.O2, 31, 0) + mem_2_1.I2 is invalid mem_2_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h2")))) mem_2_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h2")))) mem_2_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h2")))) @@ -459,12 +455,14 @@ circuit smem_0_ext : mem_3_0.CE2 <= R0_clk mem_3_0.A2 <= R0_addr node R0_data_3_0 = bits(mem_3_0.O2, 31, 0) + mem_3_0.I2 is invalid mem_3_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) mem_3_1.CE2 <= R0_clk mem_3_1.A2 <= R0_addr node R0_data_3_1 = bits(mem_3_1.O2, 31, 0) + mem_3_1.I2 is invalid mem_3_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) @@ -472,15 +470,15 @@ circuit smem_0_ext : R0_data <= mux(eq(R0_addr_sel_reg, UInt<2>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<2>("h1")), R0_data_1, mux(eq(R0_addr_sel_reg, UInt<2>("h2")), R0_data_2, mux(eq(R0_addr_sel_reg, UInt<2>("h3")), R0_data_3, UInt<1>("h0"))))) extmodule my_sram_2rw_128x32 : - input CE1 : Clock input A1 : UInt<7> + input CE1 : Clock input I1 : UInt<32> output O1 : UInt<32> input CSB1 : UInt<1> input OEB1 : UInt<1> input WEB1 : UInt<1> - input CE2 : Clock input A2 : UInt<7> + input CE2 : Clock input I2 : UInt<32> output O2 : UInt<32> input CSB2 : UInt<1> @@ -491,8 +489,8 @@ circuit smem_0_ext : module tag_array_ext : - input RW0_clk : Clock input RW0_addr : UInt<6> + input RW0_clk : Clock input RW0_wdata : UInt<80> output RW0_rdata : UInt<80> input RW0_en : UInt<1> @@ -535,8 +533,8 @@ circuit smem_0_ext : RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) extmodule my_sram_1rw_64x32 : - input CE : Clock input A : UInt<6> + input CE : Clock input I : UInt<32> output O : UInt<32> input CSB : UInt<1> @@ -547,8 +545,8 @@ circuit smem_0_ext : module _T_886_ext : - input RW0_clk : Clock input RW0_addr : UInt<9> + input RW0_clk : Clock input RW0_wdata : UInt<64> output RW0_rdata : UInt<64> input RW0_en : UInt<1> @@ -574,8 +572,8 @@ circuit smem_0_ext : RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) extmodule my_sram_1rw_512x32 : - input CE : Clock input A : UInt<9> + input CE : Clock input I : UInt<32> output O : UInt<32> input CSB : UInt<1> @@ -586,105 +584,68 @@ circuit smem_0_ext : module entries_info_ext : - input R0_clk : Clock input R0_addr : UInt<5> + input R0_clk : Clock output R0_data : UInt<40> input R0_en : UInt<1> - input W0_clk : Clock input W0_addr : UInt<5> + input W0_clk : Clock input W0_data : UInt<40> input W0_en : UInt<1> - inst mem_0_0 of my_sram_2rw_32x8 - inst mem_0_1 of my_sram_2rw_32x8 - inst mem_0_2 of my_sram_2rw_32x8 - inst mem_0_3 of my_sram_2rw_32x8 - inst mem_0_4 of my_sram_2rw_32x8 + inst mem_0_0 of my_sram_2rw_32x22 + inst mem_0_1 of my_sram_2rw_32x22 mem_0_0.CE1 <= W0_clk mem_0_0.A1 <= W0_addr - mem_0_0.I1 <= bits(W0_data, 7, 0) + mem_0_0.I1 <= bits(W0_data, 21, 0) mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_0.CSB1 <= not(and(W0_en, UInt<1>("h1"))) mem_0_1.CE1 <= W0_clk mem_0_1.A1 <= W0_addr - mem_0_1.I1 <= bits(W0_data, 15, 8) + mem_0_1.I1 <= bits(W0_data, 39, 22) mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_1.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_2.CE1 <= W0_clk - mem_0_2.A1 <= W0_addr - mem_0_2.I1 <= bits(W0_data, 23, 16) - mem_0_2.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_2.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_3.CE1 <= W0_clk - mem_0_3.A1 <= W0_addr - mem_0_3.I1 <= bits(W0_data, 31, 24) - mem_0_3.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_3.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_4.CE1 <= W0_clk - mem_0_4.A1 <= W0_addr - mem_0_4.I1 <= bits(W0_data, 39, 32) - mem_0_4.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) - mem_0_4.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_4.CSB1 <= not(and(W0_en, UInt<1>("h1"))) mem_0_0.CE2 <= R0_clk mem_0_0.A2 <= R0_addr - node R0_data_0_0 = bits(mem_0_0.O2, 7, 0) + node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) + mem_0_0.I2 is invalid mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_0.CSB2 <= not(and(R0_en, UInt<1>("h1"))) mem_0_1.CE2 <= R0_clk mem_0_1.A2 <= R0_addr - node R0_data_0_1 = bits(mem_0_1.O2, 7, 0) + node R0_data_0_1 = bits(mem_0_1.O2, 17, 0) + mem_0_1.I2 is invalid mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - mem_0_2.CE2 <= R0_clk - mem_0_2.A2 <= R0_addr - node R0_data_0_2 = bits(mem_0_2.O2, 7, 0) - mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_2.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - mem_0_3.CE2 <= R0_clk - mem_0_3.A2 <= R0_addr - node R0_data_0_3 = bits(mem_0_3.O2, 7, 0) - mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_3.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - mem_0_4.CE2 <= R0_clk - mem_0_4.A2 <= R0_addr - node R0_data_0_4 = bits(mem_0_4.O2, 7, 0) - mem_0_4.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) - mem_0_4.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_4.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - node R0_data_0 = cat(R0_data_0_4, cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0)))) + node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) - extmodule my_sram_2rw_32x8 : - input CE1 : Clock + extmodule my_sram_2rw_32x22 : input A1 : UInt<5> - input I1 : UInt<8> - output O1 : UInt<8> + input CE1 : Clock + input I1 : UInt<22> + output O1 : UInt<22> input CSB1 : UInt<1> input OEB1 : UInt<1> input WEB1 : UInt<1> - input CE2 : Clock input A2 : UInt<5> - input I2 : UInt<8> - output O2 : UInt<8> + input CE2 : Clock + input I2 : UInt<22> + output O2 : UInt<22> input CSB2 : UInt<1> input OEB2 : UInt<1> input WEB2 : UInt<1> - defname = my_sram_2rw_32x8 + defname = my_sram_2rw_32x22 module smem_ext : - input RW0_clk : Clock input RW0_addr : UInt<5> + input RW0_clk : Clock input RW0_wdata : UInt<32> output RW0_rdata : UInt<32> input RW0_en : UInt<1> @@ -951,8 +912,8 @@ circuit smem_0_ext : RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) module smem_0_ext : - input RW0_clk : Clock input RW0_addr : UInt<6> + input RW0_clk : Clock input RW0_wdata : UInt<32> output RW0_rdata : UInt<32> input RW0_en : UInt<1> @@ -1219,8 +1180,8 @@ circuit smem_0_ext : RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) extmodule my_sram_1rw_64x8 : - input CE : Clock input A : UInt<6> + input CE : Clock input I : UInt<8> output O : UInt<8> input CSB : UInt<1> @@ -1425,8 +1386,8 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { """ circuit T_2172_ext : module tag_array_ext : - input RW0_clk : Clock input RW0_addr : UInt<6> + input RW0_clk : Clock input RW0_wdata : UInt<80> output RW0_rdata : UInt<80> input RW0_en : UInt<1> @@ -1461,8 +1422,8 @@ circuit T_2172_ext : RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) extmodule SRAM1RW64x32 : - input clk : Clock input addr : UInt<6> + input clk : Clock input din : UInt<32> output dout : UInt<32> input write_en : UInt<1> @@ -1470,8 +1431,8 @@ circuit T_2172_ext : defname = SRAM1RW64x32 module T_1090_ext : - input RW0_clk : Clock input RW0_addr : UInt<9> + input RW0_clk : Clock input RW0_wdata : UInt<64> output RW0_rdata : UInt<64> input RW0_en : UInt<1> @@ -1493,8 +1454,8 @@ circuit T_2172_ext : RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) extmodule SRAM1RW512x32 : - input clk : Clock input addr : UInt<9> + input clk : Clock input din : UInt<32> output dout : UInt<32> input write_en : UInt<1> @@ -1503,8 +1464,8 @@ circuit T_2172_ext : module T_406_ext : - input RW0_clk : Clock input RW0_addr : UInt<9> + input RW0_clk : Clock input RW0_wdata : UInt<64> output RW0_rdata : UInt<64> input RW0_en : UInt<1> @@ -1563,8 +1524,8 @@ circuit T_2172_ext : RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) extmodule SRAM1RW512x8 : - input clk : Clock input addr : UInt<9> + input clk : Clock input din : UInt<8> output dout : UInt<8> input write_en : UInt<1> @@ -1573,13 +1534,13 @@ circuit T_2172_ext : module T_2172_ext : - input W0_clk : Clock input W0_addr : UInt<6> + input W0_clk : Clock input W0_data : UInt<88> input W0_en : UInt<1> input W0_mask : UInt<4> - input R0_clk : Clock input R0_addr : UInt<6> + input R0_clk : Clock output R0_data : UInt<88> input R0_en : UInt<1> @@ -1619,11 +1580,11 @@ circuit T_2172_ext : R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) extmodule SRAM2RW64x32 : - input portA_clk : Clock input portA_addr : UInt<6> + input portA_clk : Clock output portA_dout : UInt<32> - input portB_clk : Clock input portB_addr : UInt<6> + input portB_clk : Clock input portB_din : UInt<32> input portB_write_en : UInt<1> diff --git a/macros/src/test/scala/SynFlops.scala b/macros/src/test/scala/SynFlops.scala index eeac4c8ca..d2ca39d59 100644 --- a/macros/src/test/scala/SynFlops.scala +++ b/macros/src/test/scala/SynFlops.scala @@ -112,11 +112,11 @@ class Synflops_SplitPorts_Read_Write extends MacroCompilerSpec with HasSRAMGener """ circuit target_memory : module target_memory : - input outerB_clk : Clock input outerB_addr : UInt<11> + input outerB_clk : Clock output outerB_dout : UInt<8> - input outerA_clk : Clock input outerA_addr : UInt<11> + input outerA_clk : Clock input outerA_din : UInt<8> input outerA_write_en : UInt<1> """ @@ -151,11 +151,11 @@ circuit target_memory : override def generateFooterPorts = """ - input innerA_clk : Clock input innerA_addr : UInt<10> + input innerA_clk : Clock output innerA_dout : UInt<8> - input innerB_clk : Clock input innerB_addr : UInt<10> + input innerB_clk : Clock input innerB_din : UInt<8> input innerB_write_en : UInt<1> """ @@ -221,11 +221,11 @@ class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite extends MacroCompilerSpec w """ circuit target_memory : module target_memory : - input outerB_clk : Clock input outerB_addr : UInt<11> + input outerB_clk : Clock output outerB_dout : UInt<8> - input outerA_clk : Clock input outerA_addr : UInt<11> + input outerA_clk : Clock input outerA_din : UInt<8> input outerA_write_en : UInt<1> input outerA_mask : UInt<1> @@ -263,11 +263,11 @@ circuit target_memory : override def generateFooterPorts = """ - input innerA_clk : Clock input innerA_addr : UInt<10> + input innerA_clk : Clock output innerA_dout : UInt<8> - input innerB_clk : Clock input innerB_addr : UInt<10> + input innerB_clk : Clock input innerB_din : UInt<8> input innerB_write_en : UInt<1> input innerB_mask : UInt<8> diff --git a/mdf b/mdf index 88478cd2a..e9befe89e 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 88478cd2adf3fa9de12be3d066af4fc8b304a23a +Subproject commit e9befe89eb33fbfec3af4fc294201c0e9d58c837 From f5b452229a7e9aff887e03adb8fd6e2004a1e1d5 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Mon, 18 Mar 2019 08:29:35 -0700 Subject: [PATCH 130/273] Avoid using the github redirect for mdf --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 13bb74a0c..733ebcf64 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "mdf"] path = mdf - url = https://github.com/edwardcwang/plsi-mdf.git + url = https://github.com/ucb-bar/plsi-mdf.git From de94c2376a724147e9f4ed7cd757adbd5bc7bcab Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Mon, 18 Mar 2019 10:07:10 -0700 Subject: [PATCH 131/273] Add Travis (#48) --- .travis.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..0e6fa8113 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,12 @@ +git: + submodules: true +language: scala +# run on new infrastructure +sudo: false + +cache: + directories: + $HOME/.ivy2 + +script: + - sbt test From 817726ff1f779dd79eb2c6fcb905ea1bdb5178f1 Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Mon, 18 Mar 2019 10:15:50 -0700 Subject: [PATCH 132/273] stop exceptions on empty conf files (#43) * stop exceptions on empty conf files * emit empty verilog file | warn users * put else's on same line as closing bracket --- macros/src/main/scala/MacroCompiler.scala | 13 +++++++++++++ macros/src/main/scala/MemConf.scala | 12 ++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index e2d7974ff..b27a5e65a 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -839,6 +839,19 @@ object MacroCompiler extends App { } case None => } + } else { + // Warn user + System.err println "WARNING: Empty *.mems.conf file. No memories generated." + + // Emit empty verilog file if no macros found + params.get(Verilog) match { + case Some(verilogFile: String) => { + // Create an empty verilog file + val verilogWriter = new FileWriter(new File(verilogFile)) + verilogWriter.close() + } + case None => + } } } catch { case e: java.util.NoSuchElementException => diff --git a/macros/src/main/scala/MemConf.scala b/macros/src/main/scala/MemConf.scala index ded4a8896..72342a179 100644 --- a/macros/src/main/scala/MemConf.scala +++ b/macros/src/main/scala/MemConf.scala @@ -47,9 +47,13 @@ object MemConf { val regex = raw"\s*name\s+(\w+)\s+depth\s+(\d+)\s+width\s+(\d+)\s+ports\s+([^\s]+)\s+(?:mask_gran\s+(\d+))?\s*".r def fromString(s: String): Seq[MemConf] = { - s.split("\n").toSeq.map(_ match { - case MemConf.regex(name, depth, width, ports, maskGran) => MemConf(name, depth.toInt, width.toInt, MemPort.fromString(ports), Option(maskGran).map(_.toInt)) - case _ => throw new Exception(s"Error parsing MemConf string : ${s}") - }) + if (s.isEmpty) { + Seq[MemConf]() + } else { + s.split("\n").toSeq.map(_ match { + case MemConf.regex(name, depth, width, ports, maskGran) => MemConf(name, depth.toInt, width.toInt, MemPort.fromString(ports), Option(maskGran).map(_.toInt)) + case _ => throw new Exception(s"Error parsing MemConf string : ${s}") + }) + } } } From fdad525007e0099227d906dbdc8b27e32f7861be Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Mon, 18 Mar 2019 11:25:58 -0700 Subject: [PATCH 133/273] HighForm has whens so we need to check for instances there (#49) Fixes a bug --- tapeout/src/main/scala/transforms/RemoveUnusedModules.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala index e4edbd741..35d0a8936 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -28,6 +28,8 @@ class RemoveUnusedModules extends Transform { case b: Block => b.stmts.map{ someStatements(_) } .foldLeft(Seq[Statement]())(_ ++ _) + case when: Conditionally => + someStatements(when.conseq) ++ someStatements(when.alt) case i: DefInstance => Seq(i) case w: WDefInstance => Seq(w) case _ => Seq() From affd033f0a8fb6bd19a3bdd0cade71fa4cb44973 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Mon, 25 Mar 2019 22:52:39 -0700 Subject: [PATCH 134/273] Emit hammer IR from MacroCompiler (#50) --- macros/src/main/scala/MacroCompiler.scala | 46 ++++++++++++++++--- macros/src/main/scala/Utils.scala | 6 +-- macros/src/test/scala/MacroCompilerSpec.scala | 2 +- mdf | 2 +- 4 files changed, 44 insertions(+), 12 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index b27a5e65a..08720c7f3 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -18,6 +18,7 @@ import firrtl.CompilerUtils.getLoweringTransforms import mdf.macrolib.{PolarizedPort, PortPolarity, SRAMMacro, SRAMGroup, SRAMCompiler} import scala.collection.mutable.{ArrayBuffer, HashMap} import java.io.{File, FileWriter} +import scala.io.{Source} import Utils._ case class MacroCompilerException(msg: String) extends Exception(msg) @@ -76,12 +77,14 @@ object MacroCompilerAnnotation { * @param mem Path to memory lib * @param memFormat Type of memory lib (Some("conf"), Some("mdf"), or None (defaults to mdf)) * @param lib Path to library lib or None if no libraries + * @param hammerIR Path to HammerIR output or None (not generated in this case) * @param costMetric Cost metric to use * @param mode Compiler mode (see CompilerMode) * @param forceCompile Set of memories to force compiling to lib regardless of the mode * @param forceSynflops Set of memories to force compiling as flops regardless of the mode */ - case class Params(mem: String, memFormat: Option[String], lib: Option[String], costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean, + case class Params(mem: String, memFormat: Option[String], lib: Option[String], hammerIR: Option[String], + costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean, forceCompile: Set[String], forceSynflops: Set[String]) /** @@ -104,6 +107,7 @@ object MacroCompilerAnnotation { class MacroCompilerPass(mems: Option[Seq[Macro]], libs: Option[Seq[Macro]], compilers: Option[SRAMCompiler], + hammerIR: Option[String], costMetric: CostMetric = CostMetric.default, mode: MacroCompilerAnnotation.CompilerMode = MacroCompilerAnnotation.Default) extends firrtl.passes.Pass { // Helper function to check the legality of bitPairs. @@ -262,7 +266,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], bitPairs.toSeq } - def compile(mem: Macro, lib: Macro): Option[(Module, ExtModule)] = { + def compile(mem: Macro, lib: Macro): Option[(Module, Macro)] = { assert(mem.sortedPorts.lengthCompare(lib.sortedPorts.length) == 0, "mem and lib should have an equal number of ports") val pairedPorts = mem.sortedPorts zip lib.sortedPorts @@ -555,10 +559,11 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } } - Some((mem.module(Block(stmts.toSeq)), lib.blackbox)) + Some((mem.module(Block(stmts.toSeq)), lib)) } def run(c: Circuit): Circuit = { + var firstLib = true val modules = (mems, libs) match { case (Some(mems), Some(libs)) => // Try to compile each of the memories in mems. @@ -590,7 +595,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Try to compile mem against each lib in libs, keeping track of the // best compiled version, external lib used, and cost. - val (best, cost) = (fullLibs foldLeft (None: Option[(Module, ExtModule)], Double.MaxValue)){ + val (best, cost) = (fullLibs foldLeft (None: Option[(Module, Macro)], Double.MaxValue)){ case ((best, cost), lib) if mem.src.ports.size != lib.src.ports.size => /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same * order, but I'm starting with what actually gets generated. */ @@ -623,7 +628,18 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], modules } case Some((mod, bb)) => - (modules filterNot (m => m.name == mod.name || m.name == bb.name)) ++ Seq(mod, bb) + hammerIR match { + case Some(f) => { + val hammerIRWriter = new FileWriter(new File(f), !firstLib) + if(firstLib) hammerIRWriter.write("[\n") + hammerIRWriter.write(bb.src.toJSON().toString()) + hammerIRWriter.write("\n,\n") + hammerIRWriter.close() + firstLib = false + } + case None => + } + (modules filterNot (m => m.name == mod.name || m.name == bb.blackbox.name)) ++ Seq(mod, bb.blackbox) } } case _ => c.modules @@ -638,7 +654,7 @@ class MacroCompilerTransform extends Transform { def execute(state: CircuitState) = getMyAnnotations(state) match { case Seq(MacroCompilerAnnotation(state.circuit.main, - MacroCompilerAnnotation.Params(memFile, memFileFormat, libFile, costMetric, mode, useCompiler, forceCompile, forceSynflops))) => + MacroCompilerAnnotation.Params(memFile, memFileFormat, libFile, hammerIR, costMetric, mode, useCompiler, forceCompile, forceSynflops))) => if (mode == MacroCompilerAnnotation.FallbackSynflops) { throw new UnsupportedOperationException("Not implemented yet") } @@ -686,7 +702,7 @@ class MacroCompilerTransform extends Transform { }.getOrElse(Seq.empty) val transforms = Seq( - new MacroCompilerPass(memCompile, libs, compilers, costMetric, mode), + new MacroCompilerPass(memCompile, libs, compilers, hammerIR, costMetric, mode), new SynFlopsPass(true, memSynflops ++ (if (mode == MacroCompilerAnnotation.CompileAndSynflops) { libs.get } else { @@ -729,6 +745,7 @@ object MacroCompiler extends App { case object Library extends MacroParam case object Verilog extends MacroParam case object Firrtl extends MacroParam + case object HammerIR extends MacroParam case object CostFunc extends MacroParam case object Mode extends MacroParam case object UseCompiler extends MacroParam @@ -746,6 +763,7 @@ object MacroCompiler extends App { " -u, --use-compiler: Flag, whether to use the memory compiler defined in library", " -v, --verilog: Verilog output", " -f, --firrtl: FIRRTL output (optional)", + " -hir, --hammer-ir: Hammer-IR output currently only needed for IP compilers", " -c, --cost-func: Cost function to use. Optional (default: \"default\")", " -cp, --cost-param: Cost function parameter. (Optional depending on the cost function.). e.g. -c ExternalMetric -cp path /path/to/my/cost/script", " --force-compile [mem]: Force the given memory to be compiled to target libs regardless of the mode", @@ -769,6 +787,8 @@ object MacroCompiler extends App { parseArgs(map + (Verilog -> value), costMap, forcedMemories, tail) case ("-f" | "--firrtl") :: value :: tail => parseArgs(map + (Firrtl -> value), costMap, forcedMemories, tail) + case ("-hir" | "--hammer-ir") :: value :: tail => + parseArgs(map + (HammerIR -> value), costMap, forcedMemories, tail) case ("-c" | "--cost-func") :: value :: tail => parseArgs(map + (CostFunc -> value), costMap, forcedMemories, tail) case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => @@ -802,6 +822,7 @@ object MacroCompiler extends App { circuit.main, MacroCompilerAnnotation.Params( params.get(Macros).get, params.get(MacrosFormat), params.get(Library), + params.get(HammerIR), CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")), params.contains(UseCompiler), @@ -839,6 +860,17 @@ object MacroCompiler extends App { } case None => } + params.get(HammerIR) match { + case Some(hammerIRFile: String) => { + val lines = Source.fromFile(hammerIRFile).getLines().toList + val hammerIRWriter = new FileWriter(new File(hammerIRFile)) + // JSON means we need to destroy the last comma :( + lines.dropRight(1).foreach(l => hammerIRWriter.write(l + "\n")) + hammerIRWriter.write("]\n") + hammerIRWriter.close() + } + case None => + } } else { // Warn user System.err println "WARNING: Empty *.mems.conf file. No memories generated." diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 998477d32..13c39bb26 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -78,7 +78,7 @@ object Utils { } def readConfFromString(str: String): Seq[mdf.macrolib.Macro] = { MemConf.fromString(str).map { m:MemConf => - SRAMMacro(m.name, m.width, m.depth, Utils.portSpecToFamily(m.ports), Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, m.ports), Seq.empty[MacroExtraPort]) + SRAMMacro(m.name, m.width, m.depth, Utils.portSpecToFamily(m.ports), Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, m.ports)) } } def portSpecToFamily(ports: Seq[MemPort]): String = { @@ -167,10 +167,10 @@ object Utils { } def buildSRAMMacros(s: mdf.macrolib.SRAMCompiler): Seq[mdf.macrolib.SRAMMacro] = { for (g <- s.groups; d <- g.depth; w <- g.width; vt <- g.vt) - yield mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), g.extraPorts) + yield mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), vt, g.mux, g.extraPorts) } def buildSRAMMacro(g: mdf.macrolib.SRAMGroup, d: Int, w: Int, vt: String): mdf.macrolib.SRAMMacro = { - return mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), g.extraPorts) + return mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), vt, g.mux, g.extraPorts) } def makeName(g: mdf.macrolib.SRAMGroup, depth: Int, width: Int, vt: String): String = { g.name.foldLeft(""){ (builder, next) => diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index ba420fdd2..503a47a3b 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -99,7 +99,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate val macros = mems map (_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) val passes = Seq( - new MacroCompilerPass(Some(mems), libs, None, getCostMetric, if (synflops) MacroCompilerAnnotation.Synflops else MacroCompilerAnnotation.Default), + new MacroCompilerPass(Some(mems), libs, None, None, getCostMetric, if (synflops) MacroCompilerAnnotation.Synflops else MacroCompilerAnnotation.Default), new SynFlopsPass(synflops, libs getOrElse mems), RemoveEmpty) val result: Circuit = (passes foldLeft circuit)((c, pass) => pass run c) diff --git a/mdf b/mdf index e9befe89e..94839b30b 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit e9befe89eb33fbfec3af4fc294201c0e9d58c837 +Subproject commit 94839b30ba2dfec8b83c665f744353f204c3d2b9 From 8f7af5b0bfe98597b68fe5414700c44036dcf899 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Wed, 27 Mar 2019 17:20:41 -0700 Subject: [PATCH 135/273] Fix annos (#53) * Fixes #36 by using the renamemap * Also fix harness passes annotation handling h/t azidar * Remove old comment --- .../transforms/ConvertToExtModPass.scala | 36 +++++++++++++------ .../scala/transforms/ReParentCircuit.scala | 21 ++++++----- .../transforms/RemoveUnusedModules.scala | 15 ++------ 3 files changed, 38 insertions(+), 34 deletions(-) diff --git a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala index 5ef90a255..6f12e9b31 100644 --- a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala +++ b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala @@ -3,34 +3,48 @@ package barstools.tapeout.transforms import firrtl._ -import firrtl.annotations.CircuitName +import firrtl.annotations._ import firrtl.ir._ import firrtl.passes.Pass // Converts some modules to external modules, based on a given function. If // that function returns "true" then the module is converted into an ExtModule, // otherwise it's left alone. -class ConvertToExtModPass(classify: (Module) => Boolean) extends Pass { - def run(c: Circuit): Circuit = { +class ConvertToExtMod(classify: (Module) => Boolean) extends Transform { + def inputForm = HighForm + def outputForm = HighForm + + + def run(state: CircuitState): (Circuit, RenameMap) = { + + val renames = RenameMap() + val c = state.circuit + renames.setCircuit(c.main) val modulesx = c.modules.map { case m: ExtModule => m case m: Module => + val removing = collection.mutable.HashSet[String]() + def findDeadNames(statement: Statement): Unit = { + statement match { + case hn: IsDeclaration => removing += hn.name + case x => x.foreachStmt(findDeadNames) + } + } if (classify(m)) { + m.foreachStmt(findDeadNames) + removing.foreach { name => + renames.record(ReferenceTarget(c.main, m.name, Nil, name, Nil), Nil) + } new ExtModule(m.info, m.name, m.ports, m.name, Seq.empty) } else { m } } - Circuit(c.info, modulesx, c.main) + (Circuit(c.info, modulesx, c.main), renames) } -} -class ConvertToExtMod(classify: (Module) => Boolean) extends Transform with SeqTransformBased { - def inputForm = HighForm - def outputForm = HighForm - def transforms = Seq(new ConvertToExtModPass(classify)) def execute(state: CircuitState): CircuitState = { - val ret = runTransforms(state) - CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) + val (ret, renames) = run(state) + state.copy(circuit = ret, renames = Some(renames)) } } diff --git a/tapeout/src/main/scala/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/transforms/ReParentCircuit.scala index bee7a6b35..e1a426640 100644 --- a/tapeout/src/main/scala/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/transforms/ReParentCircuit.scala @@ -5,21 +5,20 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ import firrtl.passes.Pass +import firrtl.annotations._ -// "Re-Parents" a circuit, which changes the top module to something else. -class ReParentCircuitPass(newTopName: String) extends Pass { - def run(c: Circuit): Circuit = { - Circuit(c.info, c.modules, newTopName) - } -} - -class ReParentCircuit(newTopName: String) extends Transform with SeqTransformBased { +class ReParentCircuit(newTopName: String) extends Transform { def inputForm = HighForm def outputForm = HighForm - def transforms = Seq(new ReParentCircuitPass(newTopName)) + + def run(c: Circuit, newTopName: String): (Circuit, RenameMap) = { + val myRenames = RenameMap() + myRenames.record(CircuitTarget(c.main), CircuitTarget(newTopName)) + (Circuit(c.info, c.modules, newTopName), myRenames) + } def execute(state: CircuitState): CircuitState = { - val ret = runTransforms(state) - CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) + val (ret, renames) = run(state.circuit, newTopName) + state.copy(circuit = ret, renames = Some(renames)) } } diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala index 35d0a8936..24eb35f64 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -5,7 +5,7 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ import firrtl.passes.Pass -import firrtl.annotations.{SingleTargetAnnotation, Annotation} +import firrtl.annotations._ import firrtl.transforms.DontTouchAnnotation // Removes all the unused modules in a circuit by recursing through every @@ -52,18 +52,9 @@ class RemoveUnusedModules extends Transform { val renames = state.renames.getOrElse(RenameMap()) - // This is what the annotation filter should look like, but for some reason it doesn't work. - //state.circuit.modules.filterNot { usedModuleSet contains _.name } foreach { x => renames.record(ModuleTarget(state.circuit.main, x.name), Seq()) } + state.circuit.modules.filterNot { usedModuleSet contains _.name } foreach { x => renames.record(ModuleTarget(state.circuit.main, x.name), Nil) } val newCircuit = Circuit(state.circuit.info, usedModuleSeq, state.circuit.main) - val newAnnos = AnnotationSeq(state.annotations.toSeq.filter { _ match { - // XXX This is wrong, but it works for now - // Tracked by https://github.com/ucb-bar/barstools/issues/36 - case x: DontTouchAnnotation => false - //case x: DontTouchAnnotation => usedModuleNames contains x.target.module - case _ => true - }}) - - CircuitState(newCircuit, outputForm, newAnnos, Some(renames)) + state.copy(circuit = newCircuit, renames = Some(renames)) } } From e548210ef42e634e75cf283292685728114694c6 Mon Sep 17 00:00:00 2001 From: John Wright Date: Fri, 29 Mar 2019 13:55:18 -0700 Subject: [PATCH 136/273] Add options to emit top/harness firrtl and annotations (#54) --- .../src/main/scala/transforms/Generate.scala | 91 ++++++++++++++++++- 1 file changed, 88 insertions(+), 3 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 629de58c3..2878aa7aa 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -37,6 +37,28 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => "use this to set synTop" } + parser.opt[String]("top-fir") + .abbr("tsf") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + topFir = Some(x) + ) + }.text { + "use this to set topFir" + } + + parser.opt[String]("top-anno-out") + .abbr("tsaof") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + topAnnoOut = Some(x) + ) + }.text { + "use this to set topAnnoOut" + } + parser.opt[String]("harness-top") .abbr("tht") .valueName("") @@ -48,12 +70,38 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => "use this to set harnessTop" } + parser.opt[String]("harness-fir") + .abbr("thf") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + harnessFir = Some(x) + ) + }.text { + "use this to set harnessFir" + } + + parser.opt[String]("harness-anno-out") + .abbr("thaof") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + harnessAnnoOut = Some(x) + ) + }.text { + "use this to set harnessAnnoOut" + } + } case class TapeoutOptions( harnessOutput: Option[String] = None, synTop: Option[String] = None, - harnessTop: Option[String] = None + topFir: Option[String] = None, + topAnnoOut: Option[String] = None, + harnessTop: Option[String] = None, + harnessFir: Option[String] = None, + harnessAnnoOut: Option[String] = None ) extends LazyLogging // Requires two phases, one to collect modules below synTop in the hierarchy @@ -99,7 +147,26 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => customTransforms = firrtlOptions.customTransforms ++ topTransforms ) - firrtl.Driver.execute(optionsManager) + val result = firrtl.Driver.execute(optionsManager) + + result match { + case x: FirrtlExecutionSuccess => + tapeoutOptions.topFir.foreach { firFile => + val outputFile = new java.io.PrintWriter(firFile) + outputFile.write(x.circuitState.circuit.serialize) + outputFile.close() + } + tapeoutOptions.topAnnoOut.foreach { annoFile => + val outputFile = new java.io.PrintWriter(annoFile) + outputFile.write(JsonProtocol.serialize(x.circuitState.annotations.filter(_ match { + case EmittedVerilogCircuitAnnotation(_) => false + case _ => true + }))) + outputFile.close() + } + case _ => + } + } // Harness Generation @@ -109,7 +176,25 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => customTransforms = harnessTransforms ) - firrtl.Driver.execute(optionsManager) + val result = firrtl.Driver.execute(optionsManager) + + result match { + case x: FirrtlExecutionSuccess => + tapeoutOptions.harnessFir.foreach { firFile => + val outputFile = new java.io.PrintWriter(firFile) + outputFile.write(x.circuitState.circuit.serialize) + outputFile.close() + } + tapeoutOptions.harnessAnnoOut.foreach { annoFile => + val outputFile = new java.io.PrintWriter(annoFile) + outputFile.write(JsonProtocol.serialize(x.circuitState.annotations.filter(_ match { + case EmittedVerilogCircuitAnnotation(_) => false + case _ => true + }))) + outputFile.close() + } + case _ => + } } } From c23b2b6f841034952b9ff38346c3d913701fe478 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Thu, 2 May 2019 14:36:57 -0700 Subject: [PATCH 137/273] SRAM depth to bigint max synflop depth support Fix annotation mangling on the harness side --- macros/src/main/scala/CostMetric.scala | 4 +- macros/src/main/scala/MacroCompiler.scala | 2 +- macros/src/main/scala/MemConf.scala | 4 +- macros/src/main/scala/SynFlops.scala | 94 +++++----- macros/src/main/scala/Utils.scala | 4 +- macros/src/test/resources/lib-BOOMTest.json | 70 ++++---- macros/src/test/scala/CostFunction.scala | 14 +- macros/src/test/scala/Functional.scala | 8 +- macros/src/test/scala/MacroCompilerSpec.scala | 18 +- macros/src/test/scala/Masks.scala | 20 +-- macros/src/test/scala/MultiPort.scala | 18 +- macros/src/test/scala/SRAMCompiler.scala | 2 +- macros/src/test/scala/SimpleSplitDepth.scala | 84 ++++----- macros/src/test/scala/SimpleSplitWidth.scala | 78 ++++---- macros/src/test/scala/SpecificExamples.scala | 38 ++-- macros/src/test/scala/SynFlops.scala | 168 ++++++++++++++---- mdf | 2 +- .../src/main/scala/transforms/Generate.scala | 2 +- .../RenameModulesAndInstances.scala | 29 +-- 19 files changed, 383 insertions(+), 276 deletions(-) diff --git a/macros/src/main/scala/CostMetric.scala b/macros/src/main/scala/CostMetric.scala index 16f1da5df..b80324aa2 100644 --- a/macros/src/main/scala/CostMetric.scala +++ b/macros/src/main/scala/CostMetric.scala @@ -126,9 +126,9 @@ object DefaultMetric extends CostMetric with CostMetricCompanion { } val depthCost = math.ceil(mem.src.depth.toDouble / lib.src.depth.toDouble) val widthCost = math.ceil(memWidth.toDouble / lib.src.width.toDouble) - val bitsCost = (lib.src.depth * lib.src.width) + val bitsCost = (lib.src.depth * lib.src.width).toDouble // Fraction of wasted bits plus const per mem - val requestedBits = mem.src.depth * mem.src.width + val requestedBits = (mem.src.depth * mem.src.width).toDouble val bitsWasted = depthCost*widthCost*bitsCost - requestedBits val wastedConst = 0.05 // 0 means waste as few bits with no regard for instance count val costPerInst = wastedConst*depthCost*widthCost diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 08720c7f3..56820787c 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -312,7 +312,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } } } - for ((off, i) <- (0 until mem.src.depth by lib.src.depth).zipWithIndex) { + for ((off, i) <- (BigInt(0).until(mem.src.depth, lib.src.depth)).zipWithIndex) { for (j <- bitPairs.indices) { val name = s"mem_${i}_${j}" // Create the instance. diff --git a/macros/src/main/scala/MemConf.scala b/macros/src/main/scala/MemConf.scala index 72342a179..0d13c5a8e 100644 --- a/macros/src/main/scala/MemConf.scala +++ b/macros/src/main/scala/MemConf.scala @@ -30,7 +30,7 @@ object MemPort { // TODO standardize this in FIRRTL case class MemConf( name: String, - depth: Int, + depth: BigInt, width: Int, ports: Seq[MemPort], maskGranularity: Option[Int] @@ -51,7 +51,7 @@ object MemConf { Seq[MemConf]() } else { s.split("\n").toSeq.map(_ match { - case MemConf.regex(name, depth, width, ports, maskGran) => MemConf(name, depth.toInt, width.toInt, MemPort.fromString(ports), Option(maskGran).map(_.toInt)) + case MemConf.regex(name, depth, width, ports, maskGran) => MemConf(name, BigInt(depth), width.toInt, MemPort.fromString(ports), Option(maskGran).map(_.toInt)) case _ => throw new Exception(s"Error parsing MemConf string : ${s}") }) } diff --git a/macros/src/main/scala/SynFlops.scala b/macros/src/main/scala/SynFlops.scala index 1e7a4d7c1..f815b4cbb 100644 --- a/macros/src/main/scala/SynFlops.scala +++ b/macros/src/main/scala/SynFlops.scala @@ -9,8 +9,9 @@ import firrtl.passes.MemPortUtils.{memPortField, memType} import Utils._ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pass { + val extraMods = scala.collection.mutable.ArrayBuffer.empty[Module] lazy val libMods = (libs map { lib => lib.src.name -> { - val dataType = (lib.src.ports foldLeft (None: Option[BigInt]))((res, port) => + val (dataType, dataWidth) = (lib.src.ports foldLeft (None: Option[BigInt]))((res, port) => (res, port.maskPort) match { case (_, None) => res @@ -21,23 +22,35 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa res } ) match { - case None => UIntType(IntWidth(lib.src.width)) - case Some(gran) => VectorType(UIntType(IntWidth(gran)), (lib.src.width / gran).toInt) + case None => (UIntType(IntWidth(lib.src.width)), lib.src.width) + case Some(gran) => (UIntType(IntWidth(gran)), gran.intValue) } + val maxDepth = min(lib.src.depth, 1<<26) + val numMems = lib.src.depth / maxDepth + + // Change macro to be mapped onto to look like the below mem + // by changing its depth, and width + val lib_macro = new Macro(lib.src.copy(name="split_"+lib.src.name, + depth = maxDepth, width = dataWidth, ports = lib.src.ports.map(p => + p.copy(width = p.width.map(_ => dataWidth), depth = p.depth.map(_ => maxDepth), + maskGran = p.maskGran.map(_ => dataWidth))))) + val mod_macro = (new MacroCompilerPass(None,None,None,None)).compile(lib, lib_macro) + val (real_mod, real_macro) = mod_macro.get + val mem = DefMemory( NoInfo, "ram", dataType, - lib.src.depth, + maxDepth, 1, // writeLatency 1, // readLatency. This is possible because of VerilogMemDelays - lib.readers.indices map (i => s"R_$i"), - lib.writers.indices map (i => s"W_$i"), - lib.readwriters.indices map (i => s"RW_$i") + real_macro.readers.indices map (i => s"R_$i"), + real_macro.writers.indices map (i => s"W_$i"), + real_macro.readwriters.indices map (i => s"RW_$i") ) - val readConnects = lib.readers.zipWithIndex flatMap { case (r, i) => + val readConnects = real_macro.readers.zipWithIndex flatMap { case (r, i) => val clock = portToExpression(r.src.clock.get) val address = portToExpression(r.src.address) val enable = (r.src chipEnable, r.src readEnable) match { @@ -49,11 +62,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa case (None, None) => one } val data = memPortField(mem, s"R_$i", "data") - val read = (dataType: @unchecked) match { - case VectorType(tpe, size) => cat(((0 until size) map (k => - WSubIndex(data, k, tpe, UNKNOWNGENDER))).reverse) - case _: UIntType => data - } + val read = data Seq( Connect(NoInfo, memPortField(mem, s"R_$i", "clk"), clock), Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), address), @@ -62,7 +71,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa ) } - val writeConnects = lib.writers.zipWithIndex flatMap { case (w, i) => + val writeConnects = real_macro.writers.zipWithIndex flatMap { case (w, i) => val clock = portToExpression(w.src.clock.get) val address = portToExpression(w.src.address) val enable = (w.src.chipEnable, w.src.writeEnable) match { @@ -73,34 +82,32 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa case (None, Some(we)) => portToExpression(we) case (None, None) => zero // is it possible? } - val mask = memPortField(mem, s"W_$i", "mask") + val mask = w.src.maskPort match { + case Some(m) => portToExpression(m) + case None => one + } val data = memPortField(mem, s"W_$i", "data") val write = portToExpression(w.src.input.get) Seq( Connect(NoInfo, memPortField(mem, s"W_$i", "clk"), clock), Connect(NoInfo, memPortField(mem, s"W_$i", "addr"), address), - Connect(NoInfo, memPortField(mem, s"W_$i", "en"), enable) - ) ++ (dataType match { - case VectorType(tpe, size) => - val width = bitWidth(tpe).toInt - ((0 until size) map (k => - Connect(NoInfo, WSubIndex(data, k, tpe, UNKNOWNGENDER), - bits(write, (k + 1) * width - 1, k * width)))) ++ - ((0 until size) map (k => - Connect(NoInfo, WSubIndex(mask, k, BoolType, UNKNOWNGENDER), - bits(WRef(w.src.maskPort.get.name), k)))) - case _: UIntType => - Seq(Connect(NoInfo, data, write), Connect(NoInfo, mask, one)) - }) + Connect(NoInfo, memPortField(mem, s"W_$i", "en"), enable), + Connect(NoInfo, memPortField(mem, s"W_$i", "mask"), mask), + Connect(NoInfo, data, write) + ) } - val readwriteConnects = lib.readwriters.zipWithIndex flatMap { case (rw, i) => + val readwriteConnects = real_macro.readwriters.zipWithIndex flatMap { case (rw, i) => val clock = portToExpression(rw.src.clock.get) val address = portToExpression(rw.src.address) val wmode = rw.src.writeEnable match { case Some(we) => portToExpression(we) case None => zero // is it possible? } + val wmask = rw.src.maskPort match { + case Some(wm) => portToExpression(wm) + case None => one + } val enable = (rw.src.chipEnable, rw.src.readEnable) match { case (Some(en), Some(re)) => and(portToExpression(en), or(portToExpression(re), wmode)) @@ -108,40 +115,27 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa case (None, Some(re)) => or(portToExpression(re), wmode) case (None, None) => one } - val wmask = memPortField(mem, s"RW_$i", "wmask") val wdata = memPortField(mem, s"RW_$i", "wdata") val rdata = memPortField(mem, s"RW_$i", "rdata") val write = portToExpression(rw.src.input.get) - val read = (dataType: @unchecked) match { - case VectorType(tpe, size) => cat(((0 until size) map (k => - WSubIndex(rdata, k, tpe, UNKNOWNGENDER))).reverse) - case _: UIntType => rdata - } + val read = rdata Seq( Connect(NoInfo, memPortField(mem, s"RW_$i", "clk"), clock), Connect(NoInfo, memPortField(mem, s"RW_$i", "addr"), address), Connect(NoInfo, memPortField(mem, s"RW_$i", "en"), enable), Connect(NoInfo, memPortField(mem, s"RW_$i", "wmode"), wmode), - Connect(NoInfo, WRef(rw.src.output.get.name), read) - ) ++ (dataType match { - case VectorType(tpe, size) => - val width = bitWidth(tpe).toInt - ((0 until size) map (k => - Connect(NoInfo, WSubIndex(wdata, k, tpe, UNKNOWNGENDER), - bits(write, (k + 1) * width - 1, k * width)))) ++ - ((0 until size) map (k => - Connect(NoInfo, WSubIndex(wmask, k, BoolType, UNKNOWNGENDER), - bits(WRef(rw.src.maskPort.get.name), k)))) - case _: UIntType => - Seq(Connect(NoInfo, wdata, write), Connect(NoInfo, wmask, one)) - }) + Connect(NoInfo, memPortField(mem, s"RW_$i", "wmask"), wmask), + Connect(NoInfo, WRef(rw.src.output.get.name), read), + Connect(NoInfo, wdata, write) + ) } - lib.module(Block(mem +: (readConnects ++ writeConnects ++ readwriteConnects))) + extraMods.append(real_macro.module(Block(mem +: (readConnects ++ writeConnects ++ readwriteConnects)))) + real_mod }}).toMap def run(c: Circuit): Circuit = { if (!synflops) c - else c.copy(modules = (c.modules map (m => libMods getOrElse (m.name, m)))) + else c.copy(modules = (c.modules map (m => libMods.getOrElse(m.name, m))) ++ extraMods) } } diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index 13c39bb26..ad19c9171 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -91,7 +91,7 @@ object Utils { return numRStr + numWStr + numRWStr } // This translates between two represenations of ports - def portSpecToMacroPort(width: Int, depth: Int, maskGran: Option[Int], ports: Seq[MemPort]): Seq[MacroPort] = { + def portSpecToMacroPort(width: Int, depth: BigInt, maskGran: Option[Int], ports: Seq[MemPort]): Seq[MacroPort] = { var numR = 0 var numW = 0 var numRW = 0 @@ -103,7 +103,7 @@ object Utils { width=Some(width), depth=Some(depth), address=PolarizedPort(s"${portName}_addr", ActiveHigh), clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + readEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), output=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) ) } case WritePort => { diff --git a/macros/src/test/resources/lib-BOOMTest.json b/macros/src/test/resources/lib-BOOMTest.json index 8246bc3d3..1d2e5f697 100644 --- a/macros/src/test/resources/lib-BOOMTest.json +++ b/macros/src/test/resources/lib-BOOMTest.json @@ -22,7 +22,7 @@ ], "name": "my_sram_1rw_1024x8", "type": "sram", - "depth": 1024 + "depth": "1024" }, { "family": "1rw", @@ -47,7 +47,7 @@ ], "name": "my_sram_1rw_128x46", "type": "sram", - "depth": 128 + "depth": "128" }, { "family": "1rw", @@ -72,7 +72,7 @@ ], "name": "my_sram_1rw_128x48", "type": "sram", - "depth": 128 + "depth": "128" }, { "family": "1rw", @@ -97,7 +97,7 @@ ], "name": "my_sram_1rw_128x8", "type": "sram", - "depth": 128 + "depth": "128" }, { "family": "1rw", @@ -122,7 +122,7 @@ ], "name": "my_sram_1rw_256x128", "type": "sram", - "depth": 256 + "depth": "256" }, { "family": "1rw", @@ -147,7 +147,7 @@ ], "name": "my_sram_1rw_256x32", "type": "sram", - "depth": 256 + "depth": "256" }, { "family": "1rw", @@ -172,7 +172,7 @@ ], "name": "my_sram_1rw_256x46", "type": "sram", - "depth": 256 + "depth": "256" }, { "family": "1rw", @@ -197,7 +197,7 @@ ], "name": "my_sram_1rw_256x48", "type": "sram", - "depth": 256 + "depth": "256" }, { "family": "1rw", @@ -222,7 +222,7 @@ ], "name": "my_sram_1rw_256x8", "type": "sram", - "depth": 256 + "depth": "256" }, { "family": "1rw", @@ -247,7 +247,7 @@ ], "name": "my_sram_1rw_32x50", "type": "sram", - "depth": 32 + "depth": "32" }, { "family": "1rw", @@ -272,7 +272,7 @@ ], "name": "my_sram_1rw_512x128", "type": "sram", - "depth": 512 + "depth": "512" }, { "family": "1rw", @@ -297,7 +297,7 @@ ], "name": "my_sram_1rw_512x32", "type": "sram", - "depth": 512 + "depth": "512" }, { "family": "1rw", @@ -322,7 +322,7 @@ ], "name": "my_sram_1rw_512x8", "type": "sram", - "depth": 512 + "depth": "512" }, { "family": "1rw", @@ -347,7 +347,7 @@ ], "name": "my_sram_1rw_64x128", "type": "sram", - "depth": 64 + "depth": "64" }, { "family": "1rw", @@ -372,7 +372,7 @@ ], "name": "my_sram_1rw_64x32", "type": "sram", - "depth": 64 + "depth": "64" }, { "family": "1rw", @@ -397,7 +397,7 @@ ], "name": "my_sram_1rw_64x34", "type": "sram", - "depth": 64 + "depth": "64" }, { "family": "1rw", @@ -422,7 +422,7 @@ ], "name": "my_sram_1rw_64x8", "type": "sram", - "depth": 64 + "depth": "64" }, { "family": "2rw", @@ -463,7 +463,7 @@ ], "name": "my_sram_2rw_128x16", "type": "sram", - "depth": 128 + "depth": "128" }, { "family": "2rw", @@ -504,7 +504,7 @@ ], "name": "my_sram_2rw_128x32", "type": "sram", - "depth": 128 + "depth": "128" }, { "family": "2rw", @@ -545,7 +545,7 @@ ], "name": "my_sram_2rw_128x4", "type": "sram", - "depth": 128 + "depth": "128" }, { "family": "2rw", @@ -586,7 +586,7 @@ ], "name": "my_sram_2rw_128x8", "type": "sram", - "depth": 128 + "depth": "128" }, { "family": "2rw", @@ -627,7 +627,7 @@ ], "name": "my_sram_2rw_16x16", "type": "sram", - "depth": 16 + "depth": "16" }, { "family": "2rw", @@ -668,7 +668,7 @@ ], "name": "my_sram_2rw_16x32", "type": "sram", - "depth": 16 + "depth": "16" }, { "family": "2rw", @@ -709,7 +709,7 @@ ], "name": "my_sram_2rw_16x4", "type": "sram", - "depth": 16 + "depth": "16" }, { "family": "2rw", @@ -750,7 +750,7 @@ ], "name": "my_sram_2rw_16x8", "type": "sram", - "depth": 16 + "depth": "16" }, { "family": "2rw", @@ -791,7 +791,7 @@ ], "name": "my_sram_2rw_32x16", "type": "sram", - "depth": 32 + "depth": "32" }, { "family": "2rw", @@ -832,7 +832,7 @@ ], "name": "my_sram_2rw_32x22", "type": "sram", - "depth": 32 + "depth": "32" }, { "family": "2rw", @@ -873,7 +873,7 @@ ], "name": "my_sram_2rw_32x32", "type": "sram", - "depth": 32 + "depth": "32" }, { "family": "2rw", @@ -914,7 +914,7 @@ ], "name": "my_sram_2rw_32x39", "type": "sram", - "depth": 32 + "depth": "32" }, { "family": "2rw", @@ -955,7 +955,7 @@ ], "name": "my_sram_2rw_32x4", "type": "sram", - "depth": 32 + "depth": "32" }, { "family": "2rw", @@ -996,7 +996,7 @@ ], "name": "my_sram_2rw_32x8", "type": "sram", - "depth": 32 + "depth": "32" }, { "family": "2rw", @@ -1037,7 +1037,7 @@ ], "name": "my_sram_2rw_64x16", "type": "sram", - "depth": 64 + "depth": "64" }, { "family": "2rw", @@ -1078,7 +1078,7 @@ ], "name": "my_sram_2rw_64x32", "type": "sram", - "depth": 64 + "depth": "64" }, { "family": "2rw", @@ -1119,7 +1119,7 @@ ], "name": "my_sram_2rw_64x4", "type": "sram", - "depth": 64 + "depth": "64" }, { "family": "2rw", @@ -1160,6 +1160,6 @@ ], "name": "my_sram_2rw_64x8", "type": "sram", - "depth": 64 + "depth": "64" } ] diff --git a/macros/src/test/scala/CostFunction.scala b/macros/src/test/scala/CostFunction.scala index e2dbe03fa..c82080b27 100644 --- a/macros/src/test/scala/CostFunction.scala +++ b/macros/src/test/scala/CostFunction.scala @@ -31,34 +31,34 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { val libSRAMs = Seq( SRAMMacro( name="SRAM_WIDTH_128", - depth=1024, + depth=BigInt(1024), width=128, family="1rw", ports=Seq( - generateReadWritePort("", 128, 1024) + generateReadWritePort("", 128, BigInt(1024)) ) ), SRAMMacro( name="SRAM_WIDTH_64", - depth=1024, + depth=BigInt(1024), width=64, family="1rw", ports=Seq( - generateReadWritePort("", 64, 1024) + generateReadWritePort("", 64, BigInt(1024)) ) ), SRAMMacro( name="SRAM_WIDTH_32", - depth=1024, + depth=BigInt(1024), width=32, family="1rw", ports=Seq( - generateReadWritePort("", 32, 1024) + generateReadWritePort("", 32, BigInt(1024)) ) ) ) - val memSRAMs = Seq(generateSRAM("target_memory", "", 128, 1024)) + val memSRAMs = Seq(generateSRAM("target_memory", "", 128, BigInt(1024))) writeToLib(lib, libSRAMs) writeToMem(mem, memSRAMs) diff --git a/macros/src/test/scala/Functional.scala b/macros/src/test/scala/Functional.scala index cb2b180f7..2b0dfbe0e 100644 --- a/macros/src/test/scala/Functional.scala +++ b/macros/src/test/scala/Functional.scala @@ -7,8 +7,8 @@ import firrtl_interpreter.InterpretiveTester // Synchronous write and read back. class SynchronousReadAndWrite extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 12 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) compile(mem, lib, v, true) val result = execute(mem, lib, true) @@ -67,8 +67,8 @@ class SynchronousReadAndWrite extends MacroCompilerSpec with HasSRAMGenerator wi // between two submemories. class DontReadCombinationally extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) compile(mem, lib, v, true) val result = execute(mem, lib, true) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 503a47a3b..dfecc0c1c 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -122,6 +122,7 @@ trait HasSRAMGenerator { import mdf.macrolib._ import scala.language.implicitConversions implicit def Int2SomeInt(i: Int): Option[Int] = Some(i) + implicit def BigInt2SomeBigInt(i: BigInt): Option[BigInt] = Some(i) // Generate a standard (read/write/combo) port for testing. @@ -129,7 +130,7 @@ trait HasSRAMGenerator { def generateTestPort( prefix: String, width: Option[Int], - depth: Option[Int], + depth: Option[BigInt], maskGran: Option[Int] = None, read: Boolean, readEnable: Boolean = false, @@ -159,17 +160,17 @@ trait HasSRAMGenerator { } // Generate a read port for testing. - def generateReadPort(prefix: String, width: Option[Int], depth: Option[Int], readEnable: Boolean = false): MacroPort = { + def generateReadPort(prefix: String, width: Option[Int], depth: Option[BigInt], readEnable: Boolean = false): MacroPort = { generateTestPort(prefix, width, depth, write = false, read = true, readEnable = readEnable) } // Generate a write port for testing. - def generateWritePort(prefix: String, width: Option[Int], depth: Option[Int], maskGran: Option[Int] = None, writeEnable: Boolean = true): MacroPort = { + def generateWritePort(prefix: String, width: Option[Int], depth: Option[BigInt], maskGran: Option[Int] = None, writeEnable: Boolean = true): MacroPort = { generateTestPort(prefix, width, depth, maskGran = maskGran, write = true, read = false, writeEnable = writeEnable) } // Generate a simple read-write port for testing. - def generateReadWritePort(prefix: String, width: Option[Int], depth: Option[Int], maskGran: Option[Int] = None): MacroPort = { + def generateReadWritePort(prefix: String, width: Option[Int], depth: Option[BigInt], maskGran: Option[Int] = None): MacroPort = { generateTestPort( prefix, width, depth, maskGran = maskGran, write = true, writeEnable = true, @@ -178,7 +179,7 @@ trait HasSRAMGenerator { } // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). - def generateSRAM(name: String, prefix: String, width: Int, depth: Int, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMMacro = { + def generateSRAM(name: String, prefix: String, width: Int, depth: BigInt, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMMacro = { SRAMMacro( name = name, width = width, @@ -215,8 +216,8 @@ trait HasSimpleTestGenerator { def useCompiler: Boolean = false def memWidth: Int def libWidth: Int - def memDepth: Int - def libDepth: Int + def memDepth: BigInt + def libDepth: BigInt def memMaskGran: Option[Int] = None def libMaskGran: Option[Int] = None def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() @@ -276,7 +277,7 @@ trait HasSimpleTestGenerator { // Number of lib instances needed to hold the mem, in both directions. // Round up (e.g. 1.5 instances = effectively 2 instances) - val depthInstances = math.ceil(memDepth.toFloat / libDepth).toInt + val depthInstances = math.ceil(memDepth.toFloat / libDepth.toFloat).toInt val widthInstances = math.ceil(memWidth.toFloat / usableLibWidth).toInt // Number of width bits in the last width-direction memory. @@ -440,6 +441,7 @@ trait HasNoLibTestGenerator extends HasSimpleTestGenerator { // Therefore, make "lib" width/depth equal to the mem. override lazy val libDepth = memDepth override lazy val libWidth = memWidth + override lazy val lib_name = mem_name // Do the same for port names. override lazy val libPortPrefix = memPortPrefix diff --git a/macros/src/test/scala/Masks.scala b/macros/src/test/scala/Masks.scala index 1fd802022..a091a42af 100644 --- a/macros/src/test/scala/Masks.scala +++ b/macros/src/test/scala/Masks.scala @@ -6,8 +6,8 @@ import mdf.macrolib._ trait MasksTestSettings { this: MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator => - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) } // Try all four different kinds of mask config: @@ -22,7 +22,7 @@ trait MasksTestSettings { */ class Masks_FourTypes_NonMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = None override lazy val libWidth = 8 @@ -32,7 +32,7 @@ class Masks_FourTypes_NonMaskedMem_NonMaskedLib extends MacroCompilerSpec with H } class Masks_FourTypes_NonMaskedMem_MaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = None override lazy val libWidth = 8 @@ -42,7 +42,7 @@ class Masks_FourTypes_NonMaskedMem_MaskedLib extends MacroCompilerSpec with HasS } class Masks_FourTypes_MaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = Some(8) override lazy val libWidth = 8 @@ -52,7 +52,7 @@ class Masks_FourTypes_MaskedMem_NonMaskedLib extends MacroCompilerSpec with HasS } class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = Some(4) override lazy val libWidth = 8 @@ -62,7 +62,7 @@ class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran extends MacroCompil } class Masks_FourTypes_MaskedMem_MaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = Some(8) override lazy val libWidth = 16 @@ -72,7 +72,7 @@ class Masks_FourTypes_MaskedMem_MaskedLib extends MacroCompilerSpec with HasSRAM } class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = Some(8) override lazy val libWidth = 16 @@ -82,7 +82,7 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran extends MacroCompilerSpec } class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 64 override lazy val memMaskGran = Some(4) override lazy val libWidth = 32 @@ -94,7 +94,7 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran extends MacroCompilerS // Bit-mask memories to non-masked libs whose width is larger than 1. class Masks_BitMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val memMaskGran = Some(1) override lazy val libWidth = 8 diff --git a/macros/src/test/scala/MultiPort.scala b/macros/src/test/scala/MultiPort.scala index ac1fb2f8a..470fee160 100644 --- a/macros/src/test/scala/MultiPort.scala +++ b/macros/src/test/scala/MultiPort.scala @@ -6,7 +6,7 @@ package barstools.macros class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { import mdf.macrolib._ - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 64 override lazy val memMaskGran = Some(16) override lazy val libWidth = 16 @@ -18,11 +18,11 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim depth=memDepth, family="2rw", ports=Seq(generateTestPort( - "portA", memWidth, memDepth, maskGran=memMaskGran, + "portA", memWidth, Some(memDepth), maskGran=memMaskGran, write=true, writeEnable=true, read=true, readEnable=true ), generateTestPort( - "portB", memWidth, memDepth, maskGran=memMaskGran, + "portB", memWidth, Some(memDepth), maskGran=memMaskGran, write=true, writeEnable=true, read=true, readEnable=true )) @@ -121,7 +121,7 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { import mdf.macrolib._ - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 64 override lazy val memMaskGran = Some(16) override lazy val libWidth = 16 @@ -133,11 +133,11 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS depth=memDepth, family="1r1w", ports=Seq(generateTestPort( - "portA", memWidth, memDepth, maskGran=memMaskGran, + "portA", memWidth, Some(memDepth), maskGran=memMaskGran, write=false, writeEnable=false, read=true, readEnable=true ), generateTestPort( - "portB", memWidth, memDepth, maskGran=memMaskGran, + "portB", memWidth, Some(memDepth), maskGran=memMaskGran, write=true, writeEnable=true, read=false, readEnable=false )) @@ -224,7 +224,7 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { import mdf.macrolib._ - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 64 override lazy val memMaskGran = Some(16) override lazy val libWidth = 16 @@ -239,11 +239,11 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera depth=memDepth, family="2rw", ports=Seq(generateTestPort( - "portA", memWidth, memDepth, maskGran=memMaskGran, + "portA", memWidth, Some(memDepth), maskGran=memMaskGran, write=true, writeEnable=true, read=true, readEnable=true ), generateTestPort( - "portB", memWidth, memDepth, maskGran=Some(memMaskGranB), + "portB", memWidth, Some(memDepth), maskGran=Some(memMaskGranB), write=true, writeEnable=true, read=true, readEnable=true )) diff --git a/macros/src/test/scala/SRAMCompiler.scala b/macros/src/test/scala/SRAMCompiler.scala index ea6667e9f..5cae4745a 100644 --- a/macros/src/test/scala/SRAMCompiler.scala +++ b/macros/src/test/scala/SRAMCompiler.scala @@ -5,7 +5,7 @@ import mdf.macrolib._ class SRAMCompiler extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { val compiler = generateSRAMCompiler("awesome", "A") val verilog = s"v-SRAMCompiler.v" - override lazy val depth = 16 + override lazy val depth = BigInt(16) override lazy val memWidth = 8 override lazy val libWidth = 8 override lazy val mem_name = "mymem" diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 448dd06e2..18b4a9302 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -67,48 +67,48 @@ s""" // Try different widths class SplitDepth4096x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 4096 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(4096) + override lazy val libDepth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output) } class SplitDepth4096x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 16 - override lazy val memDepth = 4096 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(4096) + override lazy val libDepth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output) } class SplitDepth32768x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val memDepth = 32768 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(32768) + override lazy val libDepth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output) } class SplitDepth4096x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val memDepth = 4096 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(4096) + override lazy val libDepth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2048x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output) } class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val memDepth = 1024 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(1024) + override lazy val libDepth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output) } @@ -116,16 +116,16 @@ class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H // Non power of two class SplitDepth2000x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val memDepth = 2000 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2000) + override lazy val libDepth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output) } class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val memDepth = 2049 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2049) + override lazy val libDepth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output) } @@ -135,8 +135,8 @@ class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H // Test for mem mask == lib mask (i.e. mask is a write enable bit) class SplitDepth2048x32_mrw_lib32 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(32) @@ -145,8 +145,8 @@ class SplitDepth2048x32_mrw_lib32 extends MacroCompilerSpec with HasSRAMGenerato class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 8 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(8) @@ -156,8 +156,8 @@ class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator // Non-bit level mask class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 64 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(8) @@ -167,8 +167,8 @@ class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGen // Bit level mask class SplitDepth2048x32_mrw_mem16_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(16) override lazy val libMaskGran = Some(1) @@ -177,8 +177,8 @@ class SplitDepth2048x32_mrw_mem16_lib1 extends MacroCompilerSpec with HasSRAMGen class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(1) @@ -187,8 +187,8 @@ class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGene class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(4) override lazy val libMaskGran = Some(1) @@ -197,8 +197,8 @@ class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGene class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(2) override lazy val libMaskGran = Some(1) @@ -208,8 +208,8 @@ class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGene // Non-powers of 2 mask sizes class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(3) override lazy val libMaskGran = Some(1) @@ -219,8 +219,8 @@ class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGene class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(7) override lazy val libMaskGran = Some(1) @@ -230,8 +230,8 @@ class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGene class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val memMaskGran = Some(9) override lazy val libMaskGran = Some(1) @@ -244,8 +244,8 @@ class SplitDepth2048x8_extraPort extends MacroCompilerSpec with HasSRAMGenerator import mdf.macrolib._ override lazy val width = 8 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val extraPorts = List( MacroExtraPort(name="extra_port", width=8, portType=Constant, value=0xff) ) @@ -303,8 +303,8 @@ circuit target_memory : // Split read and (non-masked) write ports (r+w). class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGenerator { lazy val width = 8 - lazy val memDepth = 2048 - lazy val libDepth = 1024 + lazy val memDepth = BigInt(2048) + lazy val libDepth = BigInt(1024) override val memPrefix = testDir override val libPrefix = testDir @@ -462,8 +462,8 @@ TODO // Split read and (masked) write ports (r+mw). class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerator { lazy val width = 8 - lazy val memDepth = 2048 - lazy val libDepth = 1024 + lazy val memDepth = BigInt(2048) + lazy val libDepth = BigInt(1024) lazy val memMaskGran = Some(8) lazy val libMaskGran = Some(1) diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index 1096e4178..3d26c18db 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -5,7 +5,7 @@ package barstools.macros trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => - def depth: Int + def depth: BigInt override lazy val memDepth = depth override lazy val libDepth = depth @@ -69,7 +69,7 @@ s""" // Try different widths against a base memory width of 8. class SplitWidth1024x128_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 128 override lazy val libWidth = 8 @@ -77,7 +77,7 @@ class SplitWidth1024x128_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x64_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 64 override lazy val libWidth = 8 @@ -85,7 +85,7 @@ class SplitWidth1024x64_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val libWidth = 8 @@ -93,7 +93,7 @@ class SplitWidth1024x32_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 @@ -101,7 +101,7 @@ class SplitWidth1024x16_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 8 override lazy val libWidth = 8 @@ -110,7 +110,7 @@ class SplitWidth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with H // Try different widths against a base memory width of 16. class SplitWidth1024x128_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 128 override lazy val libWidth = 16 @@ -118,7 +118,7 @@ class SplitWidth1024x128_lib16_rw extends MacroCompilerSpec with HasSRAMGenerato } class SplitWidth1024x64_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 64 override lazy val libWidth = 16 @@ -126,7 +126,7 @@ class SplitWidth1024x64_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator } class SplitWidth1024x32_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val libWidth = 16 @@ -134,7 +134,7 @@ class SplitWidth1024x32_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator } class SplitWidth1024x16_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 16 @@ -143,7 +143,7 @@ class SplitWidth1024x16_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator // Try different widths against a base memory width of 8 but depth 512 instead of 1024. class SplitWidth512x128_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 512 + override lazy val depth = BigInt(512) override lazy val memWidth = 128 override lazy val libWidth = 8 @@ -151,7 +151,7 @@ class SplitWidth512x128_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth512x64_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 512 + override lazy val depth = BigInt(512) override lazy val memWidth = 64 override lazy val libWidth = 8 @@ -159,7 +159,7 @@ class SplitWidth512x64_rw extends MacroCompilerSpec with HasSRAMGenerator with H } class SplitWidth512x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 512 + override lazy val depth = BigInt(512) override lazy val memWidth = 32 override lazy val libWidth = 8 @@ -167,7 +167,7 @@ class SplitWidth512x32_rw extends MacroCompilerSpec with HasSRAMGenerator with H } class SplitWidth512x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 512 + override lazy val depth = BigInt(512) override lazy val memWidth = 16 override lazy val libWidth = 8 @@ -175,7 +175,7 @@ class SplitWidth512x16_rw extends MacroCompilerSpec with HasSRAMGenerator with H } class SplitWidth512x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 512 + override lazy val depth = BigInt(512) override lazy val memWidth = 8 override lazy val libWidth = 8 @@ -184,7 +184,7 @@ class SplitWidth512x8_rw extends MacroCompilerSpec with HasSRAMGenerator with Ha // Try non-power of two widths against a base memory width of 8. class SplitWidth1024x67_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 67 override lazy val libWidth = 8 @@ -192,7 +192,7 @@ class SplitWidth1024x67_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x60_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 60 override lazy val libWidth = 8 @@ -200,7 +200,7 @@ class SplitWidth1024x60_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x42_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 42 override lazy val libWidth = 8 @@ -208,7 +208,7 @@ class SplitWidth1024x42_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x20_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 20 override lazy val libWidth = 8 @@ -216,7 +216,7 @@ class SplitWidth1024x20_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x17_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 17 override lazy val libWidth = 8 @@ -224,7 +224,7 @@ class SplitWidth1024x17_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x15_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 15 override lazy val libWidth = 8 @@ -232,7 +232,7 @@ class SplitWidth1024x15_rw extends MacroCompilerSpec with HasSRAMGenerator with } class SplitWidth1024x9_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 9 override lazy val libWidth = 8 @@ -241,7 +241,7 @@ class SplitWidth1024x9_rw extends MacroCompilerSpec with HasSRAMGenerator with H // Try against a non-power of two base memory width. class SplitWidth1024x64_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 64 override lazy val libWidth = 11 @@ -249,7 +249,7 @@ class SplitWidth1024x64_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator } class SplitWidth1024x33_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 33 override lazy val libWidth = 11 @@ -257,7 +257,7 @@ class SplitWidth1024x33_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator } class SplitWidth1024x16_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 11 @@ -267,7 +267,7 @@ class SplitWidth1024x16_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator // Masked RAM class SplitWidth1024x8_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 8 override lazy val libWidth = 8 override lazy val memMaskGran = Some(8) @@ -277,7 +277,7 @@ class SplitWidth1024x8_memGran_8_libGran_1_rw extends MacroCompilerSpec with Has } class SplitWidth1024x16_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val memMaskGran = Some(8) @@ -287,7 +287,7 @@ class SplitWidth1024x16_memGran_8_libGran_1_rw extends MacroCompilerSpec with Ha } class SplitWidth1024x16_memGran_8_libGran_8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val memMaskGran = Some(8) @@ -297,7 +297,7 @@ class SplitWidth1024x16_memGran_8_libGran_8_rw extends MacroCompilerSpec with Ha } class SplitWidth1024x128_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 128 override lazy val libWidth = 32 override lazy val memMaskGran = Some(8) @@ -307,7 +307,7 @@ class SplitWidth1024x128_memGran_8_libGran_1_rw extends MacroCompilerSpec with H } class SplitWidth1024x16_memGran_4_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val memMaskGran = Some(4) @@ -317,7 +317,7 @@ class SplitWidth1024x16_memGran_4_libGran_1_rw extends MacroCompilerSpec with Ha } class SplitWidth1024x16_memGran_2_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val memMaskGran = Some(2) @@ -327,7 +327,7 @@ class SplitWidth1024x16_memGran_2_libGran_1_rw extends MacroCompilerSpec with Ha } class SplitWidth1024x16_memGran_16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val memMaskGran = Some(16) @@ -339,7 +339,7 @@ class SplitWidth1024x16_memGran_16_libGran_1_rw extends MacroCompilerSpec with H // Non-masked mem, masked lib class SplitWidth1024x16_libGran_8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val libMaskGran = Some(8) @@ -348,7 +348,7 @@ class SplitWidth1024x16_libGran_8_rw extends MacroCompilerSpec with HasSRAMGener } class SplitWidth1024x16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val libMaskGran = Some(1) @@ -359,7 +359,7 @@ class SplitWidth1024x16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGener // Non-memMask and non-1 libMask class SplitWidth1024x16_memGran_8_libGran_2_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val memMaskGran = Some(8) @@ -371,7 +371,7 @@ class SplitWidth1024x16_memGran_8_libGran_2_rw extends MacroCompilerSpec with Ha // Non-power of two memGran class SplitWidth1024x16_memGran_9_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val memMaskGran = Some(9) @@ -387,7 +387,7 @@ class SplitWidth1024x16_memGran_9_libGran_1_rw extends MacroCompilerSpec with Ha class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { import mdf.macrolib._ - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val libWidth = 8 @@ -445,7 +445,7 @@ class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGen class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { import mdf.macrolib._ - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val libWidth = 8 @@ -471,7 +471,7 @@ class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGen class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { import mdf.macrolib._ - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val libWidth = 8 diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 694911ee3..a7c5a0864 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -9,8 +9,8 @@ import mdf.macrolib._ // TODO: check the actual verilog's correctness? class GenerateSomeVerilog extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { override lazy val width = 32 - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) it should "execute fine" in { compileExecuteAndTest(mem, lib, v, output) @@ -35,7 +35,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { "type" : "sram", "name" : "_T_182_ext", "width" : 88, - "depth" : 64, + "depth" : "64", "ports" : [ { "address port name" : "R0_addr", "address port polarity" : "active high", @@ -62,7 +62,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { "type" : "sram", "name" : "_T_84_ext", "width" : 64, - "depth" : 512, + "depth" : "512", "ports" : [ { "address port name" : "R0_addr", "address port polarity" : "active high", @@ -89,7 +89,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { "type" : "sram", "name" : "tag_array_ext", "width" : 80, - "depth" : 64, + "depth" : "64", "ports" : [ { "address port name" : "RW0_addr", "address port polarity" : "active high", @@ -111,7 +111,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { "type" : "sram", "name" : "_T_886_ext", "width" : 64, - "depth" : 512, + "depth" : "512", "ports" : [ { "address port name" : "RW0_addr", "address port polarity" : "active high", @@ -130,7 +130,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { "type" : "sram", "name" : "entries_info_ext", "width" : 40, - "depth" : 24, + "depth" : "24", "ports" : [ { "address port name" : "R0_addr", "address port polarity" : "active high", @@ -154,7 +154,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { "type" : "sram", "name" : "smem_ext", "width" : 32, - "depth" : 32, + "depth" : "32", "ports" : [ { "address port name" : "RW0_addr", "address port polarity" : "active high", @@ -176,7 +176,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { "type" : "sram", "name" : "smem_0_ext", "width" : 32, - "depth" : 64, + "depth" : "64", "ports" : [ { "address port name" : "RW0_addr", "address port polarity" : "active high", @@ -1197,12 +1197,12 @@ circuit smem_0_ext : class SmallTagArrayTest extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleTestGenerator { // Test that mapping a smaller memory using a larger lib can still work. override def memWidth: Int = 26 - override def memDepth: Int = 2 + override def memDepth: BigInt = BigInt(2) override def memMaskGran: Option[Int] = Some(26) override def memPortPrefix: String = "" override def libWidth: Int = 32 - override def libDepth: Int = 64 + override def libDepth: BigInt = BigInt(64) override def libMaskGran: Option[Int] = Some(1) override def libPortPrefix: String = "" @@ -1239,7 +1239,7 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { width=8, family="1rw", ports=Seq( - generateReadWritePort("", 8, 1024) + generateReadWritePort("", 8, BigInt(1024)) ) ), SRAMMacro( @@ -1248,7 +1248,7 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { width=32, family="1rw", ports=Seq( - generateReadWritePort("", 32, 512) + generateReadWritePort("", 32, BigInt(512)) ) ), SRAMMacro( @@ -1257,7 +1257,7 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { width=128, family="1rw", ports=Seq( - generateReadWritePort("", 128, 64) + generateReadWritePort("", 128, BigInt(64)) ) ), SRAMMacro( @@ -1266,7 +1266,7 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { width=32, family="1rw", ports=Seq( - generateReadWritePort("", 32, 64) + generateReadWritePort("", 32, BigInt(64)) ) ), SRAMMacro( @@ -1275,7 +1275,7 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { width=8, family="1rw", ports=Seq( - generateReadWritePort("", 8, 64) + generateReadWritePort("", 8, BigInt(64)) ) ), SRAMMacro( @@ -1284,7 +1284,7 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { width=8, family="1rw", ports=Seq( - generateReadWritePort("", 8, 512) + generateReadWritePort("", 8, BigInt(512)) ) ), SRAMMacro( @@ -1293,8 +1293,8 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { width=32, family="1r1w", ports=Seq( - generateReadPort("portA", 32, 64), - generateWritePort("portB", 32, 64) + generateReadPort("portA", 32, BigInt(64)), + generateWritePort("portB", 32, BigInt(64)) ) ) ) diff --git a/macros/src/test/scala/SynFlops.scala b/macros/src/test/scala/SynFlops.scala index d2ca39d59..8198d8f38 100644 --- a/macros/src/test/scala/SynFlops.scala +++ b/macros/src/test/scala/SynFlops.scala @@ -6,6 +6,22 @@ trait HasSynFlopsTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => def generateFlops: String = { s""" + inst mem_0_0 of split_${lib_name} + mem_0_0.${libPortPrefix}_clk <= ${libPortPrefix}_clk + mem_0_0.${libPortPrefix}_addr <= ${libPortPrefix}_addr + node ${libPortPrefix}_dout_0_0 = bits(mem_0_0.${libPortPrefix}_dout, ${libWidth-1}, 0) + mem_0_0.${libPortPrefix}_din <= bits(${libPortPrefix}_din, ${libWidth-1}, 0) + mem_0_0.${libPortPrefix}_write_en <= and(and(${libPortPrefix}_write_en, UInt<1>("h1")), UInt<1>("h1")) + node ${libPortPrefix}_dout_0 = ${libPortPrefix}_dout_0_0 + ${libPortPrefix}_dout <= mux(UInt<1>("h1"), ${libPortPrefix}_dout_0, UInt<1>("h0")) + + module split_${lib_name} : + input ${libPortPrefix}_addr : UInt<${lib_addr_width}> + input ${libPortPrefix}_clk : Clock + input ${libPortPrefix}_din : UInt<${libWidth}> + output ${libPortPrefix}_dout : UInt<${libWidth}> + input ${libPortPrefix}_write_en : UInt<1> + mem ram : data-type => UInt<${libWidth}> depth => ${libDepth} @@ -17,9 +33,9 @@ s""" ram.RW_0.addr <= ${libPortPrefix}_addr ram.RW_0.en <= UInt<1>("h1") ram.RW_0.wmode <= ${libPortPrefix}_write_en + ram.RW_0.wmask <= UInt<1>("h1") ${libPortPrefix}_dout <= ram.RW_0.rdata ram.RW_0.wdata <= ${libPortPrefix}_din - ram.RW_0.wmask <= UInt<1>("h1") """ } @@ -43,29 +59,29 @@ ${generateFlops} } class Synflops2048x8_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { - override lazy val memDepth = 2048 + override lazy val memDepth = BigInt(2048) override lazy val memWidth = 8 compileExecuteAndTest(mem, None, v, output, true) } class Synflops2048x16_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { - override lazy val memDepth = 2048 + override lazy val memDepth = BigInt(2048) override lazy val memWidth = 16 compileExecuteAndTest(mem, None, v, output, true) } class Synflops8192x16_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { - override lazy val memDepth = 8192 + override lazy val memDepth = BigInt(8192) override lazy val memWidth = 16 compileExecuteAndTest(mem, None, v, output, true) } class Synflops2048x16_depth_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val width = 16 compileExecuteAndTest(mem, lib, v, output, true) @@ -74,7 +90,7 @@ class Synflops2048x16_depth_Lib extends MacroCompilerSpec with HasSRAMGenerator class Synflops2048x64_width_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator with HasSynFlopsTestGenerator { override lazy val memWidth = 64 override lazy val libWidth = 8 - override lazy val depth = 1024 + override lazy val depth = BigInt(1024) compileExecuteAndTest(mem, lib, v, output, true) } @@ -82,8 +98,8 @@ class Synflops2048x64_width_Lib extends MacroCompilerSpec with HasSRAMGenerator class Synflops_SplitPorts_Read_Write extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { import mdf.macrolib._ - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val width = 8 override def generateLibSRAM = SRAMMacro( @@ -162,6 +178,26 @@ circuit target_memory : override def generateFlops = """ + inst mem_0_0 of split_awesome_lib_mem + mem_0_0.innerB_clk <= innerB_clk + mem_0_0.innerB_addr <= innerB_addr + mem_0_0.innerB_din <= bits(innerB_din, 7, 0) + mem_0_0.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_0.innerA_clk <= innerA_clk + mem_0_0.innerA_addr <= innerA_addr + node innerA_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) + node innerA_dout_0 = innerA_dout_0_0 + innerA_dout <= mux(UInt<1>("h1"), innerA_dout_0, UInt<1>("h0")) + + module split_awesome_lib_mem : + input innerA_addr : UInt<10> + input innerA_clk : Clock + output innerA_dout : UInt<8> + input innerB_addr : UInt<10> + input innerB_clk : Clock + input innerB_din : UInt<8> + input innerB_write_en : UInt<1> + mem ram : data-type => UInt<8> depth => 1024 @@ -177,8 +213,8 @@ circuit target_memory : ram.W_0.clk <= innerB_clk ram.W_0.addr <= innerB_addr ram.W_0.en <= innerB_write_en - ram.W_0.data <= innerB_din ram.W_0.mask <= UInt<1>("h1") + ram.W_0.data <= innerB_din """ "Non-masked split lib; split mem" should "syn flops fine" in { @@ -189,8 +225,8 @@ circuit target_memory : class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { import mdf.macrolib._ - override lazy val memDepth = 2048 - override lazy val libDepth = 1024 + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) override lazy val width = 8 override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(1) @@ -275,8 +311,94 @@ circuit target_memory : override def generateFlops = """ + inst mem_0_0 of split_awesome_lib_mem + inst mem_0_1 of split_awesome_lib_mem + inst mem_0_2 of split_awesome_lib_mem + inst mem_0_3 of split_awesome_lib_mem + inst mem_0_4 of split_awesome_lib_mem + inst mem_0_5 of split_awesome_lib_mem + inst mem_0_6 of split_awesome_lib_mem + inst mem_0_7 of split_awesome_lib_mem + mem_0_0.innerB_clk <= innerB_clk + mem_0_0.innerB_addr <= innerB_addr + mem_0_0.innerB_din <= bits(innerB_din, 0, 0) + mem_0_0.innerB_mask <= bits(innerB_mask, 0, 0) + mem_0_0.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.innerB_clk <= innerB_clk + mem_0_1.innerB_addr <= innerB_addr + mem_0_1.innerB_din <= bits(innerB_din, 1, 1) + mem_0_1.innerB_mask <= bits(innerB_mask, 1, 1) + mem_0_1.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_2.innerB_clk <= innerB_clk + mem_0_2.innerB_addr <= innerB_addr + mem_0_2.innerB_din <= bits(innerB_din, 2, 2) + mem_0_2.innerB_mask <= bits(innerB_mask, 2, 2) + mem_0_2.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_3.innerB_clk <= innerB_clk + mem_0_3.innerB_addr <= innerB_addr + mem_0_3.innerB_din <= bits(innerB_din, 3, 3) + mem_0_3.innerB_mask <= bits(innerB_mask, 3, 3) + mem_0_3.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_4.innerB_clk <= innerB_clk + mem_0_4.innerB_addr <= innerB_addr + mem_0_4.innerB_din <= bits(innerB_din, 4, 4) + mem_0_4.innerB_mask <= bits(innerB_mask, 4, 4) + mem_0_4.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_5.innerB_clk <= innerB_clk + mem_0_5.innerB_addr <= innerB_addr + mem_0_5.innerB_din <= bits(innerB_din, 5, 5) + mem_0_5.innerB_mask <= bits(innerB_mask, 5, 5) + mem_0_5.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_6.innerB_clk <= innerB_clk + mem_0_6.innerB_addr <= innerB_addr + mem_0_6.innerB_din <= bits(innerB_din, 6, 6) + mem_0_6.innerB_mask <= bits(innerB_mask, 6, 6) + mem_0_6.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_7.innerB_clk <= innerB_clk + mem_0_7.innerB_addr <= innerB_addr + mem_0_7.innerB_din <= bits(innerB_din, 7, 7) + mem_0_7.innerB_mask <= bits(innerB_mask, 7, 7) + mem_0_7.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_0.innerA_clk <= innerA_clk + mem_0_0.innerA_addr <= innerA_addr + node innerA_dout_0_0 = bits(mem_0_0.innerA_dout, 0, 0) + mem_0_1.innerA_clk <= innerA_clk + mem_0_1.innerA_addr <= innerA_addr + node innerA_dout_0_1 = bits(mem_0_1.innerA_dout, 0, 0) + mem_0_2.innerA_clk <= innerA_clk + mem_0_2.innerA_addr <= innerA_addr + node innerA_dout_0_2 = bits(mem_0_2.innerA_dout, 0, 0) + mem_0_3.innerA_clk <= innerA_clk + mem_0_3.innerA_addr <= innerA_addr + node innerA_dout_0_3 = bits(mem_0_3.innerA_dout, 0, 0) + mem_0_4.innerA_clk <= innerA_clk + mem_0_4.innerA_addr <= innerA_addr + node innerA_dout_0_4 = bits(mem_0_4.innerA_dout, 0, 0) + mem_0_5.innerA_clk <= innerA_clk + mem_0_5.innerA_addr <= innerA_addr + node innerA_dout_0_5 = bits(mem_0_5.innerA_dout, 0, 0) + mem_0_6.innerA_clk <= innerA_clk + mem_0_6.innerA_addr <= innerA_addr + node innerA_dout_0_6 = bits(mem_0_6.innerA_dout, 0, 0) + mem_0_7.innerA_clk <= innerA_clk + mem_0_7.innerA_addr <= innerA_addr + node innerA_dout_0_7 = bits(mem_0_7.innerA_dout, 0, 0) + node innerA_dout_0 = cat(innerA_dout_0_7, cat(innerA_dout_0_6, cat(innerA_dout_0_5, cat(innerA_dout_0_4, cat(innerA_dout_0_3, cat(innerA_dout_0_2, cat(innerA_dout_0_1, innerA_dout_0_0))))))) + innerA_dout <= mux(UInt<1>("h1"), innerA_dout_0, UInt<1>("h0")) + + + module split_awesome_lib_mem : + input innerA_addr : UInt<10> + input innerA_clk : Clock + output innerA_dout : UInt<1> + input innerB_addr : UInt<10> + input innerB_clk : Clock + input innerB_din : UInt<1> + input innerB_write_en : UInt<1> + input innerB_mask : UInt<1> + mem ram : - data-type => UInt<1>[8] + data-type => UInt<1> depth => 1024 read-latency => 1 write-latency => 1 @@ -286,26 +408,12 @@ circuit target_memory : ram.R_0.clk <= innerA_clk ram.R_0.addr <= innerA_addr ram.R_0.en <= UInt<1>("h1") - innerA_dout <= cat(ram.R_0.data[7], cat(ram.R_0.data[6], cat(ram.R_0.data[5], cat(ram.R_0.data[4], cat(ram.R_0.data[3], cat(ram.R_0.data[2], cat(ram.R_0.data[1], ram.R_0.data[0]))))))) + innerA_dout <= ram.R_0.data ram.W_0.clk <= innerB_clk ram.W_0.addr <= innerB_addr ram.W_0.en <= innerB_write_en - ram.W_0.data[0] <= bits(innerB_din, 0, 0) - ram.W_0.data[1] <= bits(innerB_din, 1, 1) - ram.W_0.data[2] <= bits(innerB_din, 2, 2) - ram.W_0.data[3] <= bits(innerB_din, 3, 3) - ram.W_0.data[4] <= bits(innerB_din, 4, 4) - ram.W_0.data[5] <= bits(innerB_din, 5, 5) - ram.W_0.data[6] <= bits(innerB_din, 6, 6) - ram.W_0.data[7] <= bits(innerB_din, 7, 7) - ram.W_0.mask[0] <= bits(innerB_mask, 0, 0) - ram.W_0.mask[1] <= bits(innerB_mask, 1, 1) - ram.W_0.mask[2] <= bits(innerB_mask, 2, 2) - ram.W_0.mask[3] <= bits(innerB_mask, 3, 3) - ram.W_0.mask[4] <= bits(innerB_mask, 4, 4) - ram.W_0.mask[5] <= bits(innerB_mask, 5, 5) - ram.W_0.mask[6] <= bits(innerB_mask, 6, 6) - ram.W_0.mask[7] <= bits(innerB_mask, 7, 7) + ram.W_0.mask <= innerB_mask + ram.W_0.data <= innerB_din """ "masked split lib; masked split mem" should "syn flops fine" in { diff --git a/mdf b/mdf index 94839b30b..c8478e74a 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 94839b30ba2dfec8b83c665f744353f204c3d2b9 +Subproject commit c8478e74a2a2aed66e8ac3207174d4142f1a45e1 diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 2878aa7aa..32deb203c 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -173,7 +173,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => protected def executeHarness: Unit = { optionsManager.firrtlOptions = optionsManager.firrtlOptions.copy( - customTransforms = harnessTransforms + customTransforms = firrtlOptions.customTransforms ++ harnessTransforms ) val result = firrtl.Driver.execute(optionsManager) diff --git a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala index 83c3dd719..27388929e 100644 --- a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala +++ b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala @@ -3,6 +3,7 @@ package barstools.tapeout.transforms import firrtl._ +import firrtl.annotations._ import firrtl.ir._ import firrtl.passes.Pass @@ -10,7 +11,9 @@ import firrtl.passes.Pass // Verilog black box and therefore can't be renamed. Since the point is to // allow FIRRTL to be linked together using "cat" and ExtModules don't get // emitted, this should be safe. -class RenameModulesAndInstancesPass(rename: (String) => String) extends Pass { +class RenameModulesAndInstances(rename: (String) => String) extends Transform { + def inputForm = LowForm + def outputForm = LowForm def renameInstances(body: Statement): Statement = { body match { @@ -21,22 +24,22 @@ class RenameModulesAndInstancesPass(rename: (String) => String) extends Pass { } } - def run(c: Circuit): Circuit = { + def run(state: CircuitState): (Circuit, RenameMap) = { + val myRenames = RenameMap() + val c = state.circuit val modulesx = c.modules.map { - case m: ExtModule => m - case m: Module => new Module(m.info, rename(m.name), m.ports, renameInstances(m.body)) + case m: ExtModule => + myRenames.record(ModuleTarget(c.main, m.name), ModuleTarget(c.main, rename(m.name))) + m.copy(name = rename(m.name)) + case m: Module => + myRenames.record(ModuleTarget(c.main, m.name), ModuleTarget(c.main, rename(m.name))) + new Module(m.info, rename(m.name), m.ports, renameInstances(m.body)) } - Circuit(c.info, modulesx, c.main) + (Circuit(c.info, modulesx, c.main), myRenames) } -} - -class RenameModulesAndInstances(rename: (String) => String) extends Transform with SeqTransformBased { - def inputForm = LowForm - def outputForm = LowForm - def transforms = Seq(new RenameModulesAndInstancesPass(rename)) def execute(state: CircuitState): CircuitState = { - val ret = runTransforms(state) - CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) + val (ret, renames) = run(state) + state.copy(circuit = ret, renames = Some(renames)) } } From 82636b3ff43ecf6a0f0a7d46ebc2456b31e9703f Mon Sep 17 00:00:00 2001 From: John Wright Date: Tue, 5 Mar 2019 15:01:44 -0800 Subject: [PATCH 138/273] Upstream MemConf and use it (with some slight tweaks) --- macros/src/main/scala/MemConf.scala | 59 ----------------------------- macros/src/main/scala/Utils.scala | 4 +- mdf | 2 +- 3 files changed, 4 insertions(+), 61 deletions(-) delete mode 100644 macros/src/main/scala/MemConf.scala diff --git a/macros/src/main/scala/MemConf.scala b/macros/src/main/scala/MemConf.scala deleted file mode 100644 index 0d13c5a8e..000000000 --- a/macros/src/main/scala/MemConf.scala +++ /dev/null @@ -1,59 +0,0 @@ -// See LICENSE for license details. - -package barstools.macros - -import scala.util.matching._ - -sealed abstract class MemPort(val name: String) { override def toString = name } - -case object ReadPort extends MemPort("read") -case object WritePort extends MemPort("write") -case object MaskedWritePort extends MemPort("mwrite") -case object ReadWritePort extends MemPort("rw") -case object MaskedReadWritePort extends MemPort("mrw") - -object MemPort { - - val all = Set(ReadPort, WritePort, MaskedWritePort, ReadWritePort, MaskedReadWritePort) - - def apply(s: String): Option[MemPort] = MemPort.all.find(_.name == s) - - def fromString(s: String): Seq[MemPort] = { - s.split(",").toSeq.map(MemPort.apply).map(_ match { - case Some(x) => x - case _ => throw new Exception(s"Error parsing MemPort string : ${s}") - }) - } -} - -// This is based on firrtl.passes.memlib.ConfWriter -// TODO standardize this in FIRRTL -case class MemConf( - name: String, - depth: BigInt, - width: Int, - ports: Seq[MemPort], - maskGranularity: Option[Int] -) { - - private def portsStr = ports.map(_.name).mkString(",") - private def maskGranStr = maskGranularity.map((p) => s"mask_gran $p").getOrElse("") - - override def toString() = s"name ${name} depth ${depth} width ${width} ports ${portsStr} ${maskGranStr} " -} - -object MemConf { - - val regex = raw"\s*name\s+(\w+)\s+depth\s+(\d+)\s+width\s+(\d+)\s+ports\s+([^\s]+)\s+(?:mask_gran\s+(\d+))?\s*".r - - def fromString(s: String): Seq[MemConf] = { - if (s.isEmpty) { - Seq[MemConf]() - } else { - s.split("\n").toSeq.map(_ match { - case MemConf.regex(name, depth, width, ports, maskGran) => MemConf(name, BigInt(depth), width.toInt, MemPort.fromString(ports), Option(maskGran).map(_.toInt)) - case _ => throw new Exception(s"Error parsing MemConf string : ${s}") - }) - } - } -} diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index ad19c9171..ba8c664df 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -5,6 +5,7 @@ package barstools.macros import firrtl._ import firrtl.ir._ import firrtl.PrimOps +import firrtl.passes.memlib.{MemConf, MemPort, ReadPort, WritePort, ReadWritePort, MaskedWritePort, MaskedReadWritePort} import firrtl.Utils.{ceilLog2, BoolType} import mdf.macrolib.{Constant, MacroPort, SRAMMacro} import mdf.macrolib.{PolarizedPort, PortPolarity, ActiveLow, ActiveHigh, NegativeEdge, PositiveEdge, MacroExtraPort} @@ -78,7 +79,8 @@ object Utils { } def readConfFromString(str: String): Seq[mdf.macrolib.Macro] = { MemConf.fromString(str).map { m:MemConf => - SRAMMacro(m.name, m.width, m.depth, Utils.portSpecToFamily(m.ports), Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, m.ports)) + val ports = m.ports.map { case (port, num) => Seq.fill(num)(port) } reduce (_ ++ _) + SRAMMacro(m.name, m.width, m.depth, Utils.portSpecToFamily(ports), Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, ports)) } } def portSpecToFamily(ports: Seq[MemPort]): String = { diff --git a/mdf b/mdf index c8478e74a..515dda512 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit c8478e74a2a2aed66e8ac3207174d4142f1a45e1 +Subproject commit 515dda51206eb40bcbe902700abc8ca36b141c0d From e3c822709be39090ea9dad74d55239d5fc560d25 Mon Sep 17 00:00:00 2001 From: Albert Magyar Date: Mon, 29 Jul 2019 20:39:07 -0700 Subject: [PATCH 139/273] Filter all EmittedAnnotations from JSON emission (#64) * Filter all EmittedAnnotations from JSON emission * Filter more annotations --- tapeout/src/main/scala/transforms/Generate.scala | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 32deb203c..ce261a319 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -3,6 +3,7 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ import firrtl.annotations._ +import firrtl.stage.FirrtlCircuitAnnotation import firrtl.passes.Pass import java.io.File @@ -159,7 +160,8 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => tapeoutOptions.topAnnoOut.foreach { annoFile => val outputFile = new java.io.PrintWriter(annoFile) outputFile.write(JsonProtocol.serialize(x.circuitState.annotations.filter(_ match { - case EmittedVerilogCircuitAnnotation(_) => false + case ea: EmittedAnnotation[_] => false + case fca: FirrtlCircuitAnnotation => false case _ => true }))) outputFile.close() @@ -188,7 +190,8 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => tapeoutOptions.harnessAnnoOut.foreach { annoFile => val outputFile = new java.io.PrintWriter(annoFile) outputFile.write(JsonProtocol.serialize(x.circuitState.annotations.filter(_ match { - case EmittedVerilogCircuitAnnotation(_) => false + case ea: EmittedAnnotation[_] => false + case fca: FirrtlCircuitAnnotation => false case _ => true }))) outputFile.close() From 26096e07f6ce3e12b2114132c2859ef56fb0cfaf Mon Sep 17 00:00:00 2001 From: Albert Magyar Date: Tue, 30 Jul 2019 22:42:05 -0700 Subject: [PATCH 140/273] Coordinate Top and Harness generation (#63) * Coordinate Top and Harness generation * Update to use .f filename override annotations * Move top generation to def to help GC --- .../src/main/scala/transforms/Generate.scala | 150 ++++++++++++------ 1 file changed, 98 insertions(+), 52 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index ce261a319..e8a64eb7b 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -8,6 +8,8 @@ import firrtl.passes.Pass import java.io.File import firrtl.annotations.AnnotationYamlProtocol._ +import firrtl.passes.memlib.ReplSeqMemAnnotation +import firrtl.transforms.BlackBoxResourceFileNameAnno import net.jcazevedo.moultingyaml._ import com.typesafe.scalalogging.LazyLogging @@ -60,6 +62,17 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => "use this to set topAnnoOut" } + parser.opt[String]("top-dotf-out") + .abbr("tdf") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + topDotfOut = Some(x) + ) + }.text { + "use this to set the filename for the top resource .f file" + } + parser.opt[String]("harness-top") .abbr("tht") .valueName("") @@ -93,6 +106,28 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => "use this to set harnessAnnoOut" } + parser.opt[String]("harness-dotf-out") + .abbr("hdf") + .valueName("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + harnessDotfOut = Some(x) + ) + }.text { + "use this to set the filename for the harness resource .f file" + } + + parser.opt[String]("harness-conf") + .abbr("thconf") + .valueName ("") + .foreach { x => + tapeoutOptions = tapeoutOptions.copy( + harnessConf = Some(x) + ) + }.text { + "use this to set the harness conf file location" + } + } case class TapeoutOptions( @@ -100,9 +135,12 @@ case class TapeoutOptions( synTop: Option[String] = None, topFir: Option[String] = None, topAnnoOut: Option[String] = None, + topDotfOut: Option[String] = None, harnessTop: Option[String] = None, harnessFir: Option[String] = None, - harnessAnnoOut: Option[String] = None + harnessAnnoOut: Option[String] = None, + harnessDotfOut: Option[String] = None, + harnessConf: Option[String] = None ) extends LazyLogging // Requires two phases, one to collect modules below synTop in the hierarchy @@ -123,79 +161,88 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => // FIRRTL options lazy val annoFiles = firrtlOptions.annotationFileNames - private def topTransforms: Seq[Transform] = { + lazy val topTransforms: Seq[Transform] = { Seq( new ReParentCircuit(synTop.get), new RemoveUnusedModules ) } + lazy val topOptions = firrtlOptions.copy( + customTransforms = firrtlOptions.customTransforms ++ topTransforms, + annotations = firrtlOptions.annotations ++ tapeoutOptions.topDotfOut.map(BlackBoxResourceFileNameAnno(_)) + ) - private def harnessTransforms: Seq[Transform] = { + class AvoidExtModuleCollisions(mustLink: Seq[ExtModule]) extends Transform { + def inputForm = HighForm + def outputForm = HighForm + def execute(state: CircuitState): CircuitState = { + state.copy(circuit = state.circuit.copy(modules = state.circuit.modules ++ mustLink)) + } + } + + private def harnessTransforms(topExtModules: Seq[ExtModule]): Seq[Transform] = { // XXX this is a hack, we really should be checking the masters to see if they are ExtModules val externals = Set(harnessTop.get, synTop.get, "SimSerial", "SimDTM") Seq( new ConvertToExtMod((m) => m.name == synTop.get), new RemoveUnusedModules, + new AvoidExtModuleCollisions(topExtModules), new RenameModulesAndInstances((old) => if (externals contains old) old else (old + "_in" + harnessTop.get)) ) } - // Top Generation - protected def executeTop: Unit = { - - optionsManager.firrtlOptions = optionsManager.firrtlOptions.copy( - customTransforms = firrtlOptions.customTransforms ++ topTransforms - ) + // Dump firrtl and annotation files + protected def dump(res: FirrtlExecutionSuccess, firFile: Option[String], annoFile: Option[String]): Unit = { + firFile.foreach { firPath => + val outputFile = new java.io.PrintWriter(firPath) + outputFile.write(res.circuitState.circuit.serialize) + outputFile.close() + } + annoFile.foreach { annoPath => + val outputFile = new java.io.PrintWriter(annoPath) + outputFile.write(JsonProtocol.serialize(res.circuitState.annotations.filter(_ match { + case ea: EmittedAnnotation[_] => false + case fca: FirrtlCircuitAnnotation => false + case _ => true + }))) + outputFile.close() + } + } + // Top Generation + protected def executeTop(): Seq[ExtModule] = { + optionsManager.firrtlOptions = topOptions val result = firrtl.Driver.execute(optionsManager) - result match { case x: FirrtlExecutionSuccess => - tapeoutOptions.topFir.foreach { firFile => - val outputFile = new java.io.PrintWriter(firFile) - outputFile.write(x.circuitState.circuit.serialize) - outputFile.close() - } - tapeoutOptions.topAnnoOut.foreach { annoFile => - val outputFile = new java.io.PrintWriter(annoFile) - outputFile.write(JsonProtocol.serialize(x.circuitState.annotations.filter(_ match { - case ea: EmittedAnnotation[_] => false - case fca: FirrtlCircuitAnnotation => false - case _ => true - }))) - outputFile.close() - } + dump(x, tapeoutOptions.topFir, tapeoutOptions.topAnnoOut) + x.circuitState.circuit.modules.collect{ case e: ExtModule => e } case _ => + throw new Exception("executeTop failed on illegal FIRRTL input!") } - } - // Harness Generation - protected def executeHarness: Unit = { + // Top and harness generation + protected def executeTopAndHarness(): Unit = { + // Execute top and get list of ExtModules to avoid collisions + val topExtModules = executeTop() - optionsManager.firrtlOptions = optionsManager.firrtlOptions.copy( - customTransforms = firrtlOptions.customTransforms ++ harnessTransforms + // For harness run, change some firrtlOptions (below) for harness phase + // customTransforms: setup harness transforms, add AvoidExtModuleCollisions + // outputFileNameOverride: change to harnessOutput + // conf file must change to harnessConf by mapping annotations + optionsManager.firrtlOptions = firrtlOptions.copy( + customTransforms = firrtlOptions.customTransforms ++ harnessTransforms(topExtModules), + outputFileNameOverride = tapeoutOptions.harnessOutput.get, + annotations = firrtlOptions.annotations.map({ + case ReplSeqMemAnnotation(i, o) => ReplSeqMemAnnotation(i, tapeoutOptions.harnessConf.get) + case a => a + }) ++ tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)) ) - - val result = firrtl.Driver.execute(optionsManager) - - result match { - case x: FirrtlExecutionSuccess => - tapeoutOptions.harnessFir.foreach { firFile => - val outputFile = new java.io.PrintWriter(firFile) - outputFile.write(x.circuitState.circuit.serialize) - outputFile.close() - } - tapeoutOptions.harnessAnnoOut.foreach { annoFile => - val outputFile = new java.io.PrintWriter(annoFile) - outputFile.write(JsonProtocol.serialize(x.circuitState.annotations.filter(_ match { - case ea: EmittedAnnotation[_] => false - case fca: FirrtlCircuitAnnotation => false - case _ => true - }))) - outputFile.close() - } + val harnessResult = firrtl.Driver.execute(optionsManager) + harnessResult match { + case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.harnessFir, tapeoutOptions.harnessAnnoOut) case _ => } } @@ -203,10 +250,9 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => object GenerateTop extends App with GenerateTopAndHarnessApp { // Only need a single phase to generate the top module - executeTop + executeTop() } -object GenerateHarness extends App with GenerateTopAndHarnessApp { - // Do minimal work for the first phase to generate test harness - executeHarness +object GenerateTopAndHarness extends App with GenerateTopAndHarnessApp { + executeTopAndHarness() } From 76f6c8adb2d0113f6c9e4adb450255126089f32c Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Sat, 17 Aug 2019 10:35:41 -0700 Subject: [PATCH 141/273] remove large annotations --- tapeout/src/main/scala/transforms/Generate.scala | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index e8a64eb7b..8e5954993 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -202,6 +202,14 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => annoFile.foreach { annoPath => val outputFile = new java.io.PrintWriter(annoPath) outputFile.write(JsonProtocol.serialize(res.circuitState.annotations.filter(_ match { + case DeletedAnnotation(_, anno) => + anno match { + case ec: EmittedComponent => false + case ea: EmittedAnnotation[_] => false + case fca: FirrtlCircuitAnnotation => false + case _ => true + } + case ec: EmittedComponent => false case ea: EmittedAnnotation[_] => false case fca: FirrtlCircuitAnnotation => false case _ => true From 76ccb75b00bbf75f100f4f2184fe1891ba487775 Mon Sep 17 00:00:00 2001 From: Albert Magyar Date: Mon, 19 Aug 2019 09:08:30 -0700 Subject: [PATCH 142/273] Filter out all deleted annotations --- tapeout/src/main/scala/transforms/Generate.scala | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 8e5954993..e59fe75ae 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -202,13 +202,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => annoFile.foreach { annoPath => val outputFile = new java.io.PrintWriter(annoPath) outputFile.write(JsonProtocol.serialize(res.circuitState.annotations.filter(_ match { - case DeletedAnnotation(_, anno) => - anno match { - case ec: EmittedComponent => false - case ea: EmittedAnnotation[_] => false - case fca: FirrtlCircuitAnnotation => false - case _ => true - } + case da: DeletedAnnotation => false case ec: EmittedComponent => false case ea: EmittedAnnotation[_] => false case fca: FirrtlCircuitAnnotation => false From c96a5e5f4488cde3ab5d82f9d67288a4971ef146 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Thu, 24 Oct 2019 14:55:03 -0700 Subject: [PATCH 143/273] Print the firrtl exception if we get one Fixes #67 --- tapeout/src/main/scala/transforms/Generate.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index e8a64eb7b..58e0c83bb 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -218,8 +218,8 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.topFir, tapeoutOptions.topAnnoOut) x.circuitState.circuit.modules.collect{ case e: ExtModule => e } - case _ => - throw new Exception("executeTop failed on illegal FIRRTL input!") + case e => + throw new Exception(s"executeTop failed while executing FIRRTL!\n${e}") } } @@ -243,7 +243,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => val harnessResult = firrtl.Driver.execute(optionsManager) harnessResult match { case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.harnessFir, tapeoutOptions.harnessAnnoOut) - case _ => + case e => throw new Exception(s"executeHarness failed while executing FIRRTL!\n${e}") } } } From 7f0828cb3094cc743f86554769c214b6fd4c45f5 Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Fri, 25 Oct 2019 20:42:55 -0700 Subject: [PATCH 144/273] Fix macrocompiler for RW mask port --- macros/src/main/scala/MacroCompiler.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 56820787c..9e94bdb97 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -511,7 +511,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], /* Palmer: If we don't have a chip enable but do have mask ports. */ stmts += connectPorts(memMask, mask, mask_polarity) stmts += connectPorts(andAddrMatch(and(memWriteEnable, memChipEnable)), - we, mask_polarity) + we, we_polarity) case (None, Some(PolarizedPort(we, we_polarity)), chipEnable) => if (bitWidth(memMask.tpe) == 1) { /* Palmer: If we're expected to provide mask ports without a From c1004790cc6eecb1802c05421699933d8a0ce04d Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Mon, 28 Oct 2019 07:33:04 -0700 Subject: [PATCH 145/273] Use x instead of e to match other case --- tapeout/src/main/scala/transforms/Generate.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 58e0c83bb..6a9e78232 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -218,7 +218,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.topFir, tapeoutOptions.topAnnoOut) x.circuitState.circuit.modules.collect{ case e: ExtModule => e } - case e => + case x => throw new Exception(s"executeTop failed while executing FIRRTL!\n${e}") } } @@ -243,7 +243,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => val harnessResult = firrtl.Driver.execute(optionsManager) harnessResult match { case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.harnessFir, tapeoutOptions.harnessAnnoOut) - case e => throw new Exception(s"executeHarness failed while executing FIRRTL!\n${e}") + case x => throw new Exception(s"executeHarness failed while executing FIRRTL!\n${e}") } } } From be3b05a9094f2005bceb598f1dff8a40a4e46d63 Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Mon, 28 Oct 2019 13:45:05 -0700 Subject: [PATCH 146/273] add test case --- macros/src/main/scala/MacroCompiler.scala | 63 +++++++------- .../src/test/resources/lib-MaskPortTest.json | 27 ++++++ macros/src/test/scala/SpecificExamples.scala | 84 +++++++++++++++++++ 3 files changed, 143 insertions(+), 31 deletions(-) create mode 100644 macros/src/test/resources/lib-MaskPortTest.json diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 9e94bdb97..4a6acf4b2 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -772,37 +772,38 @@ object MacroCompiler extends App { ) ++ modeOptions) mkString "\n" def parseArgs(map: MacroParamMap, costMap: CostParamMap, forcedMemories: ForcedMemories, - args: List[String]): (MacroParamMap, CostParamMap, ForcedMemories) = - args match { - case Nil => (map, costMap, forcedMemories) - case ("-n" | "--macro-conf") :: value :: tail => - parseArgs(map + (Macros -> value) + (MacrosFormat -> "conf"), costMap, forcedMemories, tail) - case ("-m" | "--macro-mdf") :: value :: tail => - parseArgs(map + (Macros -> value) + (MacrosFormat -> "mdf"), costMap, forcedMemories, tail) - case ("-l" | "--library") :: value :: tail => - parseArgs(map + (Library -> value), costMap, forcedMemories, tail) - case ("-u" | "--use-compiler") :: tail => - parseArgs(map + (UseCompiler -> ""), costMap, forcedMemories, tail) - case ("-v" | "--verilog") :: value :: tail => - parseArgs(map + (Verilog -> value), costMap, forcedMemories, tail) - case ("-f" | "--firrtl") :: value :: tail => - parseArgs(map + (Firrtl -> value), costMap, forcedMemories, tail) - case ("-hir" | "--hammer-ir") :: value :: tail => - parseArgs(map + (HammerIR -> value), costMap, forcedMemories, tail) - case ("-c" | "--cost-func") :: value :: tail => - parseArgs(map + (CostFunc -> value), costMap, forcedMemories, tail) - case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => - parseArgs(map, costMap + (value1 -> value2), forcedMemories, tail) - case "--force-compile" :: value :: tail => - parseArgs(map, costMap, forcedMemories.copy(_1 = forcedMemories._1 + value), tail) - case "--force-synflops" :: value :: tail => - parseArgs(map, costMap, forcedMemories.copy(_2 = forcedMemories._2 + value), tail) - case "--mode" :: value :: tail => - parseArgs(map + (Mode -> value), costMap, forcedMemories, tail) - case arg :: tail => - println(s"Unknown field $arg\n") - println(usage) - sys.exit(1) + args: List[String]): (MacroParamMap, CostParamMap, ForcedMemories) = { + args match { + case Nil => (map, costMap, forcedMemories) + case ("-n" | "--macro-conf") :: value :: tail => + parseArgs(map + (Macros -> value) + (MacrosFormat -> "conf"), costMap, forcedMemories, tail) + case ("-m" | "--macro-mdf") :: value :: tail => + parseArgs(map + (Macros -> value) + (MacrosFormat -> "mdf"), costMap, forcedMemories, tail) + case ("-l" | "--library") :: value :: tail => + parseArgs(map + (Library -> value), costMap, forcedMemories, tail) + case ("-u" | "--use-compiler") :: tail => + parseArgs(map + (UseCompiler -> ""), costMap, forcedMemories, tail) + case ("-v" | "--verilog") :: value :: tail => + parseArgs(map + (Verilog -> value), costMap, forcedMemories, tail) + case ("-f" | "--firrtl") :: value :: tail => + parseArgs(map + (Firrtl -> value), costMap, forcedMemories, tail) + case ("-hir" | "--hammer-ir") :: value :: tail => + parseArgs(map + (HammerIR -> value), costMap, forcedMemories, tail) + case ("-c" | "--cost-func") :: value :: tail => + parseArgs(map + (CostFunc -> value), costMap, forcedMemories, tail) + case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => + parseArgs(map, costMap + (value1 -> value2), forcedMemories, tail) + case "--force-compile" :: value :: tail => + parseArgs(map, costMap, forcedMemories.copy(_1 = forcedMemories._1 + value), tail) + case "--force-synflops" :: value :: tail => + parseArgs(map, costMap, forcedMemories.copy(_2 = forcedMemories._2 + value), tail) + case "--mode" :: value :: tail => + parseArgs(map + (Mode -> value), costMap, forcedMemories, tail) + case arg :: tail => + println(s"Unknown field $arg\n") + println(usage) + sys.exit(1) + } } def run(args: List[String]) { diff --git a/macros/src/test/resources/lib-MaskPortTest.json b/macros/src/test/resources/lib-MaskPortTest.json new file mode 100644 index 000000000..72df79474 --- /dev/null +++ b/macros/src/test/resources/lib-MaskPortTest.json @@ -0,0 +1,27 @@ +[ + { + "type" : "sram", + "name" : "fake_mem", + "width" : 64, + "depth" : "512", + "mux" : 4, + "family" : "1rw", + "ports" : [ { + "address port name" : "addr", + "address port polarity" : "active high", + "clock port name" : "clk", + "clock port polarity" : "positive edge", + "write enable port name" : "wen", + "write enable port polarity" : "active high", + "read enable port name" : "ren", + "read enable port polarity" : "active high", + "output port name" : "dataout", + "output port polarity" : "active high", + "input port name" : "datain", + "input port polarity" : "active high", + "mask port name" : "mport", + "mask port polarity" : "active low", + "mask granularity" : 1 + } ] + } +] diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index a7c5a0864..f59473c33 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -22,6 +22,90 @@ class GenerateSomeVerilog extends MacroCompilerSpec with HasSRAMGenerator with H } } +class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { + val mem = s"mem-MaskPortTest.json" // mem. you want to create + val lib = s"lib-MaskPortTest.json" // lib. of mems to create it + val v = s"MaskPortTest.json" + + override val libPrefix = "macros/src/test/resources" + + val memSRAMs = mdf.macrolib.Utils.readMDFFromString( +""" +[ { + "type" : "sram", + "name" : "cc_dir_ext", + "width" : 128, + "depth" : "512", + "mux" : 1, + "ports" : [ { + "address port name" : "RW0_addr", + "address port polarity" : "active high", + "clock port name" : "RW0_clk", + "clock port polarity" : "positive edge", + "write enable port name" : "RW0_wmode", + "write enable port polarity" : "active high", + "chip enable port name" : "RW0_en", + "chip enable port polarity" : "active high", + "output port name" : "RW0_rdata", + "output port polarity" : "active high", + "input port name" : "RW0_wdata", + "input port polarity" : "active high", + "mask port name" : "RW0_wmask", + "mask port polarity" : "active high", + "mask granularity" : 16 + } ], + "family" : "1rw" +} ] +""").getOrElse(List()) + + writeToMem(mem, memSRAMs) + + val output = +""" +circuit cc_dir_ext : + module cc_dir_ext : + input RW0_addr : UInt<9> + input RW0_clk : Clock + input RW0_wdata : UInt<128> + output RW0_rdata : UInt<128> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + input RW0_wmask : UInt<8> + + inst mem_0_0 of fake_mem + inst mem_0_1 of fake_mem + mem_0_0.clk <= RW0_clk + mem_0_0.addr <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.dataout, 63, 0) + mem_0_0.datain <= bits(RW0_wdata, 63, 0) + mem_0_0.ren <= and(not(RW0_wmode), UInt<1>("h1")) + mem_0_0.mport <= not(cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), bits(RW0_wmask, 0, 0))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) + mem_0_0.wen <= and(and(RW0_wmode, RW0_en), UInt<1>("h1")) + mem_0_1.clk <= RW0_clk + mem_0_1.addr <= RW0_addr + node RW0_rdata_0_1 = bits(mem_0_1.dataout, 63, 0) + mem_0_1.datain <= bits(RW0_wdata, 127, 64) + mem_0_1.ren <= and(not(RW0_wmode), UInt<1>("h1")) + mem_0_1.mport <= not(cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), bits(RW0_wmask, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) + mem_0_1.wen <= and(and(RW0_wmode, RW0_en), UInt<1>("h1")) + node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule fake_mem : + input addr : UInt<9> + input clk : Clock + input datain : UInt<64> + output dataout : UInt<64> + input ren : UInt<1> + input wen : UInt<1> + input mport : UInt<64> + + defname = fake_mem +""" + + compileExecuteAndTest(mem, lib, v, output) +} + class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { val mem = s"mem-BOOMTest.json" val lib = s"lib-BOOMTest.json" From 6c59cac7443d6d409ce4039b105ff4d1eb3ba807 Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Mon, 28 Oct 2019 13:47:07 -0700 Subject: [PATCH 147/273] fix spacing --- macros/src/main/scala/MacroCompiler.scala | 63 +++++++++++------------ 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 4a6acf4b2..9e94bdb97 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -772,38 +772,37 @@ object MacroCompiler extends App { ) ++ modeOptions) mkString "\n" def parseArgs(map: MacroParamMap, costMap: CostParamMap, forcedMemories: ForcedMemories, - args: List[String]): (MacroParamMap, CostParamMap, ForcedMemories) = { - args match { - case Nil => (map, costMap, forcedMemories) - case ("-n" | "--macro-conf") :: value :: tail => - parseArgs(map + (Macros -> value) + (MacrosFormat -> "conf"), costMap, forcedMemories, tail) - case ("-m" | "--macro-mdf") :: value :: tail => - parseArgs(map + (Macros -> value) + (MacrosFormat -> "mdf"), costMap, forcedMemories, tail) - case ("-l" | "--library") :: value :: tail => - parseArgs(map + (Library -> value), costMap, forcedMemories, tail) - case ("-u" | "--use-compiler") :: tail => - parseArgs(map + (UseCompiler -> ""), costMap, forcedMemories, tail) - case ("-v" | "--verilog") :: value :: tail => - parseArgs(map + (Verilog -> value), costMap, forcedMemories, tail) - case ("-f" | "--firrtl") :: value :: tail => - parseArgs(map + (Firrtl -> value), costMap, forcedMemories, tail) - case ("-hir" | "--hammer-ir") :: value :: tail => - parseArgs(map + (HammerIR -> value), costMap, forcedMemories, tail) - case ("-c" | "--cost-func") :: value :: tail => - parseArgs(map + (CostFunc -> value), costMap, forcedMemories, tail) - case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => - parseArgs(map, costMap + (value1 -> value2), forcedMemories, tail) - case "--force-compile" :: value :: tail => - parseArgs(map, costMap, forcedMemories.copy(_1 = forcedMemories._1 + value), tail) - case "--force-synflops" :: value :: tail => - parseArgs(map, costMap, forcedMemories.copy(_2 = forcedMemories._2 + value), tail) - case "--mode" :: value :: tail => - parseArgs(map + (Mode -> value), costMap, forcedMemories, tail) - case arg :: tail => - println(s"Unknown field $arg\n") - println(usage) - sys.exit(1) - } + args: List[String]): (MacroParamMap, CostParamMap, ForcedMemories) = + args match { + case Nil => (map, costMap, forcedMemories) + case ("-n" | "--macro-conf") :: value :: tail => + parseArgs(map + (Macros -> value) + (MacrosFormat -> "conf"), costMap, forcedMemories, tail) + case ("-m" | "--macro-mdf") :: value :: tail => + parseArgs(map + (Macros -> value) + (MacrosFormat -> "mdf"), costMap, forcedMemories, tail) + case ("-l" | "--library") :: value :: tail => + parseArgs(map + (Library -> value), costMap, forcedMemories, tail) + case ("-u" | "--use-compiler") :: tail => + parseArgs(map + (UseCompiler -> ""), costMap, forcedMemories, tail) + case ("-v" | "--verilog") :: value :: tail => + parseArgs(map + (Verilog -> value), costMap, forcedMemories, tail) + case ("-f" | "--firrtl") :: value :: tail => + parseArgs(map + (Firrtl -> value), costMap, forcedMemories, tail) + case ("-hir" | "--hammer-ir") :: value :: tail => + parseArgs(map + (HammerIR -> value), costMap, forcedMemories, tail) + case ("-c" | "--cost-func") :: value :: tail => + parseArgs(map + (CostFunc -> value), costMap, forcedMemories, tail) + case ("-cp" | "--cost-param") :: value1 :: value2 :: tail => + parseArgs(map, costMap + (value1 -> value2), forcedMemories, tail) + case "--force-compile" :: value :: tail => + parseArgs(map, costMap, forcedMemories.copy(_1 = forcedMemories._1 + value), tail) + case "--force-synflops" :: value :: tail => + parseArgs(map, costMap, forcedMemories.copy(_2 = forcedMemories._2 + value), tail) + case "--mode" :: value :: tail => + parseArgs(map + (Mode -> value), costMap, forcedMemories, tail) + case arg :: tail => + println(s"Unknown field $arg\n") + println(usage) + sys.exit(1) } def run(args: List[String]) { From 46e2ecb9ae79f32ea864a3de5648586dab794434 Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Tue, 5 Nov 2019 14:04:31 -0800 Subject: [PATCH 148/273] Fix MacroCompiler for CE-less Library Memories If a memory doesn't have a mask and doesn't have a chip enable, make sure that you use the `mem` chip enable to connect to the `we` port on the `lib` memory. Fixes a bug where the `lib` `we` signal would be tied to the `mem` `wmode` signal but then the macro would have no `en` signal connected to it. --- macros/src/main/scala/MacroCompiler.scala | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 9e94bdb97..c51e78455 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -517,13 +517,16 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], /* Palmer: If we're expected to provide mask ports without a * memory that actually has them then we can use the * write enable port instead of the mask port. */ - stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), - we, we_polarity) chipEnable match { case Some(PolarizedPort(en, en_polarity)) => { + stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), + we, we_polarity) stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) } - case _ => // TODO: do we care about the case where mem has chipEnable but lib doesn't? + case _ => { + stmts += connectPorts(andAddrMatch(and(and(memWriteEnable, memChipEnable), memMask)), + we, we_polarity) + } } } else { System.err.println("cannot emulate multi-bit mask ports with write enable") From 34984802b20d627f567a349db13aa01520d2d56a Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Tue, 5 Nov 2019 14:16:53 -0800 Subject: [PATCH 149/273] enforce re is disabled when we is enabled --- macros/src/main/scala/MacroCompiler.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index c51e78455..09b86c873 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -495,7 +495,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], case (Some(PolarizedPort(mem, _)), Some(PolarizedPort(lib, lib_polarity))) => stmts += connectPorts(andAddrMatch(WRef(mem)), lib, lib_polarity) case (None, Some(PolarizedPort(lib, lib_polarity))) => - stmts += connectPorts(andAddrMatch(not(memWriteEnable)), lib, lib_polarity) + stmts += connectPorts(andAddrMatch(and(not(memWriteEnable), memChipEnable)), lib, lib_polarity) } /* Palmer: This is actually the memory compiler: it figures out how to From ecc52b9b7cf8818dfbc5924dcf8ae315a26b85dc Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Tue, 5 Nov 2019 21:29:57 -0800 Subject: [PATCH 150/273] add test case for we bug --- .../test/resources/lib-WriteEnableTest.json | 24 +++++++ macros/src/test/scala/SpecificExamples.scala | 70 +++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 macros/src/test/resources/lib-WriteEnableTest.json diff --git a/macros/src/test/resources/lib-WriteEnableTest.json b/macros/src/test/resources/lib-WriteEnableTest.json new file mode 100644 index 000000000..be7852a6c --- /dev/null +++ b/macros/src/test/resources/lib-WriteEnableTest.json @@ -0,0 +1,24 @@ +[ + { + "type" : "sram", + "name" : "fake_mem", + "width" : 64, + "depth" : "4096", + "mux" : 4, + "family" : "1rw", + "ports" : [ { + "address port name" : "addr", + "address port polarity" : "active high", + "clock port name" : "clk", + "clock port polarity" : "positive edge", + "write enable port name" : "wen", + "write enable port polarity" : "active high", + "read enable port name" : "ren", + "read enable port polarity" : "active high", + "output port name" : "dataout", + "output port polarity" : "active high", + "input port name" : "datain", + "input port polarity" : "active high" + } ] + } +] diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index f59473c33..7179d20f0 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -22,6 +22,76 @@ class GenerateSomeVerilog extends MacroCompilerSpec with HasSRAMGenerator with H } } +class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { + val mem = s"mem-WriteEnableTest.json" // mem. you want to create + val lib = s"lib-WriteEnableTest.json" // lib. of mems to create it + val v = s"WriteEnableTest.json" + + override val libPrefix = "macros/src/test/resources" + + val memSRAMs = mdf.macrolib.Utils.readMDFFromString( +""" +[ { + "type" : "sram", + "name" : "cc_banks_0_ext", + "width" : 64, + "depth" : "4096", + "mux" : 1, + "ports" : [ { + "address port name" : "RW0_addr", + "address port polarity" : "active high", + "clock port name" : "RW0_clk", + "clock port polarity" : "positive edge", + "write enable port name" : "RW0_wmode", + "write enable port polarity" : "active high", + "chip enable port name" : "RW0_en", + "chip enable port polarity" : "active high", + "output port name" : "RW0_rdata", + "output port polarity" : "active high", + "input port name" : "RW0_wdata", + "input port polarity" : "active high" + } ], + "family" : "1rw" +} ] +""").getOrElse(List()) + + writeToMem(mem, memSRAMs) + + val output = +""" + circuit cc_banks_0_ext : + module cc_banks_0_ext : + input RW0_addr : UInt<12> + input RW0_clk : Clock + input RW0_wdata : UInt<64> + output RW0_rdata : UInt<64> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + + inst mem_0_0 of fake_mem + mem_0_0.clk <= RW0_clk + mem_0_0.addr <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.dataout, 63, 0) + mem_0_0.datain <= bits(RW0_wdata, 63, 0) + mem_0_0.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) + mem_0_0.wen <= and(and(and(RW0_wmode, RW0_en), UInt<1>("h1")), UInt<1>("h1")) + node RW0_rdata_0 = RW0_rdata_0_0 + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule fake_mem : + input addr : UInt<12> + input clk : Clock + input datain : UInt<64> + output dataout : UInt<64> + input ren : UInt<1> + input wen : UInt<1> + + defname = fake_mem +""" + + compileExecuteAndTest(mem, lib, v, output) +} + class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { val mem = s"mem-MaskPortTest.json" // mem. you want to create val lib = s"lib-MaskPortTest.json" // lib. of mems to create it From 1e114d03558089789b5391675dee933e2175c18a Mon Sep 17 00:00:00 2001 From: Abraham Gonzalez Date: Thu, 7 Nov 2019 10:17:24 -0800 Subject: [PATCH 151/273] Match inner variables --- tapeout/src/main/scala/transforms/Generate.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 6a9e78232..eb10503e7 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -219,7 +219,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => dump(x, tapeoutOptions.topFir, tapeoutOptions.topAnnoOut) x.circuitState.circuit.modules.collect{ case e: ExtModule => e } case x => - throw new Exception(s"executeTop failed while executing FIRRTL!\n${e}") + throw new Exception(s"executeTop failed while executing FIRRTL!\n${x}") } } @@ -243,7 +243,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => val harnessResult = firrtl.Driver.execute(optionsManager) harnessResult match { case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.harnessFir, tapeoutOptions.harnessAnnoOut) - case x => throw new Exception(s"executeHarness failed while executing FIRRTL!\n${e}") + case x => throw new Exception(s"executeHarness failed while executing FIRRTL!\n${x}") } } } From e4cce07c7818f1fb599390f3b9f97fe81fe43c44 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Wed, 23 Oct 2019 10:56:02 -0700 Subject: [PATCH 152/273] Fix issues after chisel update for august 2019 --- tapeout/src/main/scala/transforms/ResetInverter.scala | 6 ++++-- tapeout/src/main/scala/transforms/retime/Retime.scala | 6 ++++-- .../main/scala/transforms/utils/ProgrammaticBundle.scala | 8 ++++---- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index d2f756f04..cbdb09075 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -58,7 +58,9 @@ class ResetInverterTransform extends Transform { trait ResetInverter { self: chisel3.Module => - def invert(component: InstanceId): Unit = { - annotate(chisel3.experimental.ChiselAnnotation(component, classOf[ResetInverterTransform], "invert")) + def invert[T <: chisel3.experimental.LegacyModule](module: T): Unit = { + chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation{ + def toFirrtl: Annotation = ResetInverterAnnotation(module.toNamed) + }) } } diff --git a/tapeout/src/main/scala/transforms/retime/Retime.scala b/tapeout/src/main/scala/transforms/retime/Retime.scala index 0f67adeae..a2a39b04d 100644 --- a/tapeout/src/main/scala/transforms/retime/Retime.scala +++ b/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -39,7 +39,9 @@ class RetimeTransform extends Transform { trait RetimeLib { self: chisel3.Module => - def retime(component: InstanceId): Unit = { - annotate(chisel3.experimental.ChiselAnnotation(component, classOf[RetimeTransform], "retime")) + def retime[T <: chisel3.experimental.LegacyModule](module: T): Unit = { + chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation{ + def toFirrtl: Annotation = RetimeAnnotation(module.toNamed) + }) } } diff --git a/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala b/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala index c3eec670a..d73d05db0 100644 --- a/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala +++ b/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala @@ -4,7 +4,7 @@ import chisel3._ import scala.collection.immutable.ListMap class CustomBundle[T <: Data](elts: (String, T)*) extends Record { - val elements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) + val elements = ListMap(elts map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) def apply(elt: String): T = elements(elt) def apply(elt: Int): T = elements(elt.toString) override def cloneType = (new CustomBundle(elements.toList: _*)).asInstanceOf[this.type] @@ -12,9 +12,9 @@ class CustomBundle[T <: Data](elts: (String, T)*) extends Record { class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { // Must be String, Data - val elements = ListMap(elts map { case (field, elt) => field.toString -> elt.chiselCloneType }: _*) + val elements = ListMap(elts map { case (field, elt) => field.toString -> chiselTypeOf(elt) }: _*) // TODO: Make an equivalent to the below work publicly (or only on subclasses?) - def indexedElements = ListMap(elts map { case (field, elt) => field -> elt.chiselCloneType }: _*) + def indexedElements = ListMap(elts map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) def apply(elt: Int): T = elements(elt.toString) override def cloneType = (new CustomIndexedBundle(indexedElements.toList: _*)).asInstanceOf[this.type] } @@ -23,4 +23,4 @@ object CustomIndexedBundle { def apply[T <: Data](gen: T, idxs: Seq[Int]) = new CustomIndexedBundle(idxs.map(_ -> gen): _*) // Allows Vecs of elements of different types/widths def apply[T <: Data](gen: Seq[T]) = new CustomIndexedBundle(gen.zipWithIndex.map{ case (elt, field) => field -> elt }: _*) -} \ No newline at end of file +} From e0081208b98b6a7fb1265dc5f2bec1c40decddf7 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Thu, 31 Oct 2019 08:40:01 -0700 Subject: [PATCH 153/273] Updates for rocket-chip bump --- tapeout/src/main/scala/transforms/ResetInverter.scala | 2 +- tapeout/src/main/scala/transforms/retime/Retime.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index cbdb09075..da090dbf0 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -58,7 +58,7 @@ class ResetInverterTransform extends Transform { trait ResetInverter { self: chisel3.Module => - def invert[T <: chisel3.experimental.LegacyModule](module: T): Unit = { + def invert[T <: chisel3.internal.LegacyModule](module: T): Unit = { chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation{ def toFirrtl: Annotation = ResetInverterAnnotation(module.toNamed) }) diff --git a/tapeout/src/main/scala/transforms/retime/Retime.scala b/tapeout/src/main/scala/transforms/retime/Retime.scala index a2a39b04d..231687bf4 100644 --- a/tapeout/src/main/scala/transforms/retime/Retime.scala +++ b/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -39,7 +39,7 @@ class RetimeTransform extends Transform { trait RetimeLib { self: chisel3.Module => - def retime[T <: chisel3.experimental.LegacyModule](module: T): Unit = { + def retime[T <: chisel3.internal.LegacyModule](module: T): Unit = { chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation{ def toFirrtl: Annotation = RetimeAnnotation(module.toNamed) }) From 8ca876503c71cb6779a956b30bb17078d0f4ef1f Mon Sep 17 00:00:00 2001 From: Albert Magyar Date: Tue, 11 Feb 2020 20:04:22 -0700 Subject: [PATCH 154/273] Correctly specify width of default zero output value (#74) --- macros/src/main/scala/MacroCompiler.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 09b86c873..652e36e99 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -549,11 +549,12 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } } // Connect mem outputs + val zeroOutputValue: Expression = UIntLiteral(0, IntWidth(mem.src.width)) mem.src.ports foreach { port => port.output match { case Some(PolarizedPort(mem, _)) => outputs get mem match { case Some(select) => - val output = (select foldRight (zero: Expression)) { + val output = (select foldRight (zeroOutputValue)) { case ((cond, tval), fval) => Mux(cond, tval, fval, fval.tpe) } stmts += Connect(NoInfo, WRef(mem), output) case None => From 7de4c478c33458dc3f195e377543ce6bab9ba5b8 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Tue, 18 Feb 2020 14:56:17 -0800 Subject: [PATCH 155/273] Update to chisel 3.2.x --- build.sbt | 5 ++ .../transforms/.clkgen/ClkAnnotations.scala | 79 +++++++++---------- .../transforms/.pads/AddIOPadsTransform.scala | 8 +- .../transforms/.pads/AnnotateSupplyPads.scala | 14 ++-- .../transforms/.pads/ChiselTopModule.scala | 39 ++++----- .../scala/transforms/.pads/CreatePadBBs.scala | 18 ++--- .../transforms/.pads/FoundryPadsYaml.scala | 24 +++--- .../transforms/.pads/PadAnnotations.scala | 50 ++++++------ .../transforms/.pads/PadDescriptors.scala | 9 +-- 9 files changed, 124 insertions(+), 122 deletions(-) diff --git a/build.sbt b/build.sbt index d254ca248..9ec44bf7c 100644 --- a/build.sbt +++ b/build.sbt @@ -39,6 +39,11 @@ lazy val macros = (project in file("macros")) lazy val tapeout = (project in file("tapeout")) .settings(commonSettings) + .settings(Seq( + libraryDependencies ++= Seq( + "io.github.daviddenton" %% "handlebars-scala-fork" % "2.3.0" + ) + )) .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) lazy val root = (project in file(".")).aggregate(macros, tapeout) diff --git a/tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala b/tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala index 832cfbb12..3a2f9ba3c 100644 --- a/tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala +++ b/tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala @@ -42,8 +42,8 @@ case object ClkGen extends ClkModType { def serialize: String = "gen" } -// Unlike typical SDC, starts at 0. -// Otherwise, see pg. 63 of "Constraining Designs for Synthesis and Timing Analysis" +// Unlike typical SDC, starts at 0. +// Otherwise, see pg. 63 of "Constraining Designs for Synthesis and Timing Analysis" // by S. Gangadharan // original clk: |-----|_____|-----|_____| // edges: 0 1 2 3 4 @@ -51,9 +51,9 @@ case object ClkGen extends ClkModType { // ---> |-----------|___________| // sources = source id's case class GeneratedClk( - id: String, - sources: Seq[String] = Seq(), - referenceEdges: Seq[Int] = Seq(), + id: String, + sources: Seq[String] = Seq(), + referenceEdges: Seq[Int] = Seq(), period: Option[Double] = None) { require(referenceEdges.sorted == referenceEdges, "Edges must be in order for generated clk") if (referenceEdges.nonEmpty) require(referenceEdges.length % 2 == 1, "# of reference edges must be odd!") @@ -64,13 +64,13 @@ case class ClkModAnnotation(tpe: String, generatedClks: Seq[GeneratedClk]) { def modType: ClkModType = HasClkAnnotation.modType(tpe) modType match { - case ClkDiv => + case ClkDiv => generatedClks foreach { c => require(c.referenceEdges.nonEmpty, "Reference edges must be defined for clk divider!") require(c.sources.length == 1, "Clk divider output can only have 1 source") require(c.period.isEmpty, "No period should be specified for clk divider output") } - case ClkMux => + case ClkMux => generatedClks foreach { c => require(c.referenceEdges.isEmpty, "Reference edges must not be defined for clk mux!") require(c.period.isEmpty, "No period should be specified for clk mux output") @@ -92,22 +92,24 @@ abstract class FirrtlClkTransformAnnotation { } // Firrtl version -case class TargetClkModAnnoF(target: ModuleName, anno: ClkModAnnotation) extends FirrtlClkTransformAnnotation { +case class TargetClkModAnnoF(target: ModuleName, anno: ClkModAnnotation) extends FirrtlClkTransformAnnotation with SingleTargetAnnotation[ModuleName] { + def duplicate(n: ModuleName): TargetClkModAnnoF = this.copy(target = n) def getAnno = Annotation(target, classOf[ClkSrcTransform], anno.serialize) def targetName = target.name def modType = anno.modType def generatedClks = anno.generatedClks - def getAllClkPorts = anno.generatedClks.map(x => + def getAllClkPorts = anno.generatedClks.map(x => List(List(x.id), x.sources).flatten).flatten.distinct.map(Seq(targetName, _).mkString(".")) } // Chisel version -case class TargetClkModAnnoC(target: Module, anno: ClkModAnnotation) { - def getAnno = ChiselAnnotation(target, classOf[ClkSrcTransform], anno.serialize) +case class TargetClkModAnnoC(target: Module, anno: ClkModAnnotation) extends ChiselAnnotation { + def toFirrtl = TargetClkModAnnoF(target.toNamed, anno) } // Firrtl version -case class TargetClkPortAnnoF(target: ComponentName, anno: ClkPortAnnotation) extends FirrtlClkTransformAnnotation { +case class TargetClkPortAnnoF(target: ComponentName, anno: ClkPortAnnotation) extends FirrtlClkTransformAnnotation with SingleTargetAnnotation[ComponentName] { + def duplicate(n: ComponentName): TargetClkPortAnnoF = this.copy(target = n) def getAnno = Annotation(target, classOf[ClkSrcTransform], anno.serialize) def targetName = Seq(target.module.name, target.name).mkString(".") def modId = Seq(target.module.name, anno.id).mkString(".") @@ -115,8 +117,8 @@ case class TargetClkPortAnnoF(target: ComponentName, anno: ClkPortAnnotation) ex } // Chisel version -case class TargetClkPortAnnoC(target: Element, anno: ClkPortAnnotation) { - def getAnno = ChiselAnnotation(target, classOf[ClkSrcTransform], anno.serialize) +case class TargetClkPortAnnoC(target: Element, anno: ClkPortAnnotation) extends ChiselAnnotation { + def toFirrtl = TargetClkPortAnnoF(target.toNamed, anno) } object HasClkAnnotation { @@ -132,31 +134,31 @@ object HasClkAnnotation { def unapply(a: Annotation): Option[FirrtlClkTransformAnnotation] = a match { case Annotation(f, t, s) if t == classOf[ClkSrcTransform] => f match { - case m: ModuleName => + case m: ModuleName => Some(TargetClkModAnnoF(m, s.parseYaml.convertTo[ClkModAnnotation])) case c: ComponentName => Some(TargetClkPortAnnoF(c, s.parseYaml.convertTo[ClkPortAnnotation])) - case _ => throw new Exception("Clk source annotation only valid on module or component!") + case _ => throw new Exception("Clk source annotation only valid on module or component!") } case _ => None } def apply(annos: Seq[Annotation]): Option[(Seq[TargetClkModAnnoF],Seq[TargetClkPortAnnoF])] = { // Get all clk-related annotations - val clkAnnos = annos.map(x => unapply(x)).flatten + val clkAnnos = annos.map(x => unapply(x)).flatten val targets = clkAnnos.map(x => x.targetName) require(targets.distinct.length == targets.length, "Only 1 clk related annotation is allowed per component/module") if (clkAnnos.length == 0) None else { - val componentAnnos = clkAnnos.filter { + val componentAnnos = clkAnnos.filter { case TargetClkPortAnnoF(ComponentName(_, ModuleName(_, _)), _) => true case _ => false }.map(x => x.asInstanceOf[TargetClkPortAnnoF]) val associatedMods = componentAnnos.map(x => x.target.module.name) - val moduleAnnos = clkAnnos.filter { - case TargetClkModAnnoF(ModuleName(m, _), _) => + val moduleAnnos = clkAnnos.filter { + case TargetClkModAnnoF(ModuleName(m, _), _) => require(associatedMods contains m, "Clk modules should always have clk port annotations!") - true + true case _ => false }.map(x => x.asInstanceOf[TargetClkModAnnoF]) Some((moduleAnnos, componentAnnos)) @@ -170,29 +172,26 @@ trait IsClkModule { self: chisel3.Module => - private def doNotDedup(module: Module): Unit = { - annotate(ChiselAnnotation(module, classOf[DedupModules], "nodedup!")) - } doNotDedup(this) private def extractElementNames(signal: Data): Seq[String] = { val names = signal match { - case elt: Record => + case elt: Record => elt.elements.map { case (key, value) => extractElementNames(value).map(x => key + "_" + x) }.toSeq.flatten - case elt: Vec[_] => + case elt: Vec[_] => elt.zipWithIndex.map { case (elt, i) => extractElementNames(elt).map(x => i + "_" + x) }.toSeq.flatten case elt: Element => Seq("") case elt => throw new Exception(s"Cannot extractElementNames for type ${elt.getClass}") } - names.map(s => s.stripSuffix("_")) + names.map(s => s.stripSuffix("_")) } // TODO: Replace! def extractElements(signal: Data): Seq[Element] = { signal match { - case elt: Record => + case elt: Record => elt.elements.map { case (key, value) => extractElements(value) }.toSeq.flatten - case elt: Vec[_] => + case elt: Vec[_] => elt.map { elt => extractElements(elt) }.toSeq.flatten case elt: Element => Seq(elt) case elt => throw new Exception(s"Cannot extractElements for type ${elt.getClass}") @@ -200,7 +199,7 @@ trait IsClkModule { } def getIOName(signal: Element): String = { - val possibleNames = extractElements(io).zip(extractElementNames(io)).map { + val possibleNames = extractElements(io).zip(extractElementNames(io)).map { case (sig, name) if sig == signal => Some(name) case _ => None }.flatten @@ -208,11 +207,11 @@ trait IsClkModule { else throw new Exception("You can only get the name of an io port!") } - def annotateDerivedClks(tpe: ClkModType, generatedClks: Seq[GeneratedClk]): Unit = + def annotateDerivedClks(tpe: ClkModType, generatedClks: Seq[GeneratedClk]): Unit = annotateDerivedClks(ClkModAnnotation(tpe.serialize, generatedClks)) def annotateDerivedClks(anno: ClkModAnnotation): Unit = annotateDerivedClks(this, anno) - def annotateDerivedClks(m: Module, anno: ClkModAnnotation): Unit = - annotate(TargetClkModAnnoC(m, anno).getAnno) + def annotateDerivedClks(m: Module, anno: ClkModAnnotation): Unit = + annotate(TargetClkModAnnoC(m, anno)) def annotateClkPort(p: Element): Unit = annotateClkPort(p, None, "") def annotateClkPort(p: Element, sink: Sink): Unit = annotateClkPort(p, Some(sink), "") @@ -221,7 +220,7 @@ trait IsClkModule { def annotateClkPort(p: Element, sink: Option[Sink], id: String): Unit = { // If no id is specified, it'll try to figure out a name, assuming p is an io port val newId = id match { - case "" => + case "" => getIOName(p) case _ => id } @@ -229,12 +228,12 @@ trait IsClkModule { } def annotateClkPort(p: Element, anno: ClkPortAnnotation): Unit = { - p.dir match { - case chisel3.core.Direction.Input => + DataMirror.directionOf(p) match { + case chisel3.core.ActualDirection.Input => require(anno.tag.nonEmpty, "Module inputs must be clk sinks") - require(anno.tag.get.src.isEmpty, + require(anno.tag.get.src.isEmpty, "Clock module (not top) input clks should not have clk period, etc. specified") - case chisel3.core.Direction.Output => + case chisel3.core.ActualDirection.Output => require(anno.tag.isEmpty, "Module outputs must not be clk sinks (they're sources!)") case _ => throw new Exception("Clk port direction must be specified!") @@ -243,6 +242,6 @@ trait IsClkModule { case _: chisel3.core.Clock => case _ => throw new Exception("Clock port must be of type Clock") } - annotate(TargetClkPortAnnoC(p, anno).getAnno) + annotate(TargetClkPortAnnoC(p, anno)) } -} \ No newline at end of file +} diff --git a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala index f9501f8f8..d427d0d92 100644 --- a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala +++ b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala @@ -52,12 +52,12 @@ class AddIOPadsTransform extends Transform with SeqTransformBased { ) // Expects BlackBox helper to be run after to inline pad Verilog! val ret = runTransforms(state) - val currentAnnos = ret.annotations.getOrElse(AnnotationMap(Seq.empty)).annotations - val newAnnoMap = AnnotationMap(currentAnnos ++ bbAnnotations) - val newState = CircuitState(ret.circuit, outputForm, Some(newAnnoMap), ret.renames) + val currentAnnos = ret.annotations + val newAnnoMap = AnnotationSeq(currentAnnos ++ bbAnnotations) + val newState = CircuitState(ret.circuit, outputForm, newAnnoMap, ret.renames) // TODO: *.f file is overwritten on subsequent executions, but it doesn't seem to be used anywhere? (new firrtl.transforms.BlackBoxSourceHelper).execute(newState) } } -} \ No newline at end of file +} diff --git a/tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala b/tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala index 27b93b46a..cda007914 100644 --- a/tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala +++ b/tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala @@ -22,13 +22,13 @@ case class TopSupplyPad( require(pad.padType == SupplyPad) def padOrientation = padSide.orientation - def getPadName = pad.getName(NoDirection, padOrientation) + def getPadName = pad.getName(Output/*Should be None*/, padOrientation) def firrtlBBName = getPadName private def instNamePrefix = Seq(firrtlBBName, padSide.serialize).mkString("_") def instNames = (0 until num).map(i => Seq(instNamePrefix, i.toString).mkString("_")) def createPadInline(): String = { - def getPadVerilog(): String = pad.getVerilog(NoDirection, padOrientation) + def getPadVerilog(): String = pad.getVerilog(Output/*Should be None*/, padOrientation) s"""inline |${getPadName}.v |${getPadVerilog}""".stripMargin @@ -37,14 +37,14 @@ case class TopSupplyPad( object AnnotateSupplyPads { def apply( - pads: Seq[FoundryPad], + pads: Seq[FoundryPad], supplyAnnos: Seq[SupplyAnnotation] ): Seq[TopSupplyPad] = { - supplyAnnos.map( a => + supplyAnnos.map( a => pads.find(_.name == a.padName) match { - case None => + case None => throw new Exception(s"Supply pad ${a.padName} not found in Yaml file!") - case Some(x) => + case Some(x) => Seq( TopSupplyPad(x, Left, a.leftSide), TopSupplyPad(x, Right, a.rightSide), @@ -53,4 +53,4 @@ object AnnotateSupplyPads { } ).flatten.filter(_.num > 0) } -} \ No newline at end of file +} diff --git a/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala b/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala index 8f891e62d..36979ce51 100644 --- a/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala +++ b/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala @@ -8,8 +8,8 @@ import firrtl.transforms.DedupModules // TODO: Move out of pads // NOTE: You can't really annotate outside of the module itself UNLESS you break up the compile step in 2 i.e. -// annotate post-Chisel but pre-Firrtl (unfortunate non-generator friendly downside). -// It's recommended to have a Tapeout specific TopModule wrapper. +// annotate post-Chisel but pre-Firrtl (unfortunate non-generator friendly downside). +// It's recommended to have a Tapeout specific TopModule wrapper. // LIMITATION: All signals of a bus must be on the same chip side // Chisel-y annotations @@ -19,14 +19,17 @@ abstract class TopModule( coreWidth: Int = 0, coreHeight: Int = 0, usePads: Boolean = true, - override_clock: Option[Clock] = None, - override_reset: Option[Bool] = None) extends Module(override_clock, override_reset) with IsClkModule { + override_clock: Option[Clock] = None, + override_reset: Option[Bool] = None) extends Module with IsClkModule { + + override_clock.foreach(clock := _) + override_reset.foreach(reset := _) override def annotateClkPort(p: Element, anno: ClkPortAnnotation): Unit = { - p.dir match { - case chisel3.core.Direction.Input => + DataMirror.directionOf(p) match { + case chisel3.core.ActualDirection.Input => require(anno.tag.nonEmpty, "Top Module input clks must be clk sinks") - require(anno.tag.get.src.nonEmpty, + require(anno.tag.get.src.nonEmpty, "Top module input clks must have clk period, etc. specified") case _ => throw new Exception("Clk port direction must be specified!") @@ -35,10 +38,10 @@ abstract class TopModule( case _: chisel3.core.Clock => case _ => throw new Exception("Clock port must be of type Clock") } - annotate(TargetClkPortAnnoC(p, anno).getAnno) + annotate(TargetClkPortAnnoC(p, anno)) } - override def annotateDerivedClks(m: Module, anno: ClkModAnnotation): Unit = + override def annotateDerivedClks(m: Module, anno: ClkModAnnotation): Unit = throw new Exception("Top module cannot be pure clock module!") // Annotate module as top module (that requires pad transform) @@ -52,25 +55,25 @@ abstract class TopModule( coreHeight = coreHeight, supplyAnnos = supplyAnnos ) - annotate(TargetModulePadAnnoC(this, modulePadAnnotation).getAnno) + annotate(TargetModulePadAnnoC(this, modulePadAnnotation)) } - + // Annotate IO with side + pad name def annotatePad(sig: Element, side: PadSide = defaultPadSide, name: String = ""): Unit = if (usePads) { val anno = IOPadAnnotation(side.serialize, name) - annotate(TargetIOPadAnnoC(sig, anno).getAnno) + annotate(TargetIOPadAnnoC(sig, anno)) } def annotatePad(sig: Aggregate, name: String): Unit = annotatePad(sig, side = defaultPadSide, name) def annotatePad(sig: Aggregate, side: PadSide): Unit = annotatePad(sig, side, name = "") - def annotatePad(sig: Aggregate, side: PadSide, name: String): Unit = + def annotatePad(sig: Aggregate, side: PadSide, name: String): Unit = extractElements(sig) foreach { x => annotatePad(x, side, name) } - // There may be cases where pads were inserted elsewhere. If that's the case, allow certain IO to - // not have pads auto added. Note that annotatePad and noPad are mutually exclusive! - def noPad(sig: Element): Unit = if (usePads) annotate(TargetIOPadAnnoC(sig, NoIOPadAnnotation()).getAnno) + // There may be cases where pads were inserted elsewhere. If that's the case, allow certain IO to + // not have pads auto added. Note that annotatePad and noPad are mutually exclusive! + def noPad(sig: Element): Unit = if (usePads) annotate(TargetIOPadAnnoC(sig, NoIOPadAnnotation())) def noPad(sig: Aggregate): Unit = extractElements(sig) foreach { x => noPad(x) } - // Since this is a super class, this should be the first thing that gets run + // Since this is a super class, this should be the first thing that gets run // (at least when the module is actually at the top -- currently no guarantees otherwise :( firrtl limitation) createPads() -} \ No newline at end of file +} diff --git a/tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala b/tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala index 1a7f2aa90..5b35fbe4f 100644 --- a/tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala +++ b/tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala @@ -40,7 +40,7 @@ object CreatePadBBs { } def checkLegalPadName(namespace: Namespace, usedPads: Seq[UsedPadInfo]): Unit = { - usedPads foreach { x => + usedPads foreach { x => if (namespace contains x.padName) throw new Exception(s"Pad name ${x.padName} already used!") if (namespace contains x.padArrayName) @@ -61,21 +61,21 @@ object CreatePadBBs { // Note that we need to check for Firrtl name uniqueness here! (due to parameterization) val uniqueExtMods = scala.collection.mutable.ArrayBuffer[UsedPadInfo]() - usedPads foreach { x => + usedPads foreach { x => if (uniqueExtMods.find(_.firrtlBBName == x.firrtlBBName).isEmpty) uniqueExtMods += x } - // Collecting unique parameterized black boxes + // Collecting unique parameterized black boxes // (for io, they're wrapped pads; for supply, they're pad modules directly) val uniqueParameterizedBBs = scala.collection.mutable.ArrayBuffer[UsedPadInfo]() - uniqueExtMods foreach { x => + uniqueExtMods foreach { x => if (uniqueParameterizedBBs.find(_.padArrayName == x.padArrayName).isEmpty) uniqueParameterizedBBs += x } - // Note: Firrtl is silly and doesn't implement true parameterization -- each module with - // parameterization that potentially affects # of IO needs to be uniquely identified + // Note: Firrtl is silly and doesn't implement true parameterization -- each module with + // parameterization that potentially affects # of IO needs to be uniquely identified // (but only in Firrtl) val bbs = uniqueExtMods.map(x => { // Supply pads don't have ports @@ -100,10 +100,10 @@ object CreatePadBBs { // Add annotations to black boxes to inline Verilog from template // Again, note the weirdness in parameterization -- just need to hook to one matching Firrtl instance - val annos = uniqueParameterizedBBs.map(x => - BlackBoxSourceAnnotation(ModuleName(x.firrtlBBName, CircuitName(c.main)), x.padInline) + val annos = uniqueParameterizedBBs.map(x => + BlackBoxInlineAnno(ModuleName(x.firrtlBBName, CircuitName(c.main)), x.firrtlBBName, x.padInline) ).toSeq (c.copy(modules = c.modules ++ bbs), annos) } -} \ No newline at end of file +} diff --git a/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala b/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala index ff1b92f0d..2d372a51f 100644 --- a/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala +++ b/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala @@ -7,11 +7,11 @@ import firrtl.ir._ import barstools.tapeout.transforms._ case class FoundryPad( - tpe: String, - name: String, - width: Int, + tpe: String, + name: String, + width: Int, height: Int, - supplySetNum: Option[Int], + supplySetNum: Option[Int], verilog: String) { def padInstName = "PAD" @@ -23,16 +23,16 @@ case class FoundryPad( def getSupplySetNum = supplySetNum.getOrElse(1) val padType = tpe match { - case "digital" => + case "digital" => require(verilog.contains(DigitalPad.inName), "Digital pad template must contain input called 'in'") require(verilog.contains(DigitalPad.outName), "Digital pad template must contain output called 'out'") require(verilog.contains("{{#if isInput}}"), "Digital pad template must contain '{{#if isInput}}'") DigitalPad - case "analog" => + case "analog" => require(verilog.contains(AnalogPad.ioName), "Analog pad template must contain inout called 'io'") require(!verilog.contains("{{#if isInput}}"), "Analog pad template must not contain '{{#if isInput}}'") AnalogPad - case "supply" => + case "supply" => // Supply pads don't have IO require(!verilog.contains("{{#if isInput}}"), "Supply pad template must not contain '{{#if isInput}}'") require( @@ -57,8 +57,8 @@ case class FoundryPad( private val orient = if (isHorizontal) Horizontal.serialize else Vertical.serialize private val dir = padType match { - case AnalogPad => InOut.serialize - case SupplyPad => NoDirection.serialize + case AnalogPad => "inout" + case SupplyPad => "none" case DigitalPad => if (isInput) Input.serialize else Output.serialize } val name = { @@ -69,7 +69,7 @@ case class FoundryPad( } // Note: Analog + supply don't use direction - private def getTemplateParams(dir: Direction, orient: PadOrientation): TemplateParams = + private def getTemplateParams(dir: Direction, orient: PadOrientation): TemplateParams = TemplateParams(isInput = (dir == Input), isHorizontal = (orient == Horizontal)) def getVerilog(dir: Direction, orient: PadOrientation): String = { @@ -85,11 +85,11 @@ object FoundryPadsYaml extends DefaultYamlProtocol { implicit val _pad = yamlFormat6(FoundryPad) def parse(techDir: String): Seq[FoundryPad] = { val file = techDir + exampleResource - if(techDir != "" && !(new java.io.File(file)).exists()) + if(techDir != "" && !(new java.io.File(file)).exists()) throw new Exception("Technology directory must contain FoundryPads.yaml!") val out = (new YamlFileReader(exampleResource)).parse[FoundryPad](if (techDir == "") "" else file) val padNames = out.map(x => x.correctedName) require(padNames.distinct.length == padNames.length, "Pad names must be unique!") out } -} \ No newline at end of file +} diff --git a/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala b/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala index 66b7f1843..ed870092d 100644 --- a/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala +++ b/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala @@ -21,7 +21,7 @@ abstract class FirrtlPadTransformAnnotation { // IO Port can either be annotated with padName + padSide OR noPad (mutually exclusive) abstract class IOAnnotation { - def serialize: String + def serialize: String } case class IOPadAnnotation(padSide: String, padName: String) extends IOAnnotation { import PadAnnotationsYaml._ @@ -31,29 +31,30 @@ case class IOPadAnnotation(padSide: String, padName: String) extends IOAnnotatio case class NoIOPadAnnotation(noPad: String = "") extends IOAnnotation { import PadAnnotationsYaml._ def serialize: String = this.toYaml.prettyPrint - def field = "noPad:" + def field = "noPad:" } // Firrtl version -case class TargetIOPadAnnoF(target: ComponentName, anno: IOAnnotation) extends FirrtlPadTransformAnnotation { +case class TargetIOPadAnnoF(target: ComponentName, anno: IOAnnotation) extends FirrtlPadTransformAnnotation with SingleTargetAnnotation[ComponentName] { + def duplicate(n: ComponentName): TargetIOPadAnnoF = this.copy(target = n) def getAnno = Annotation(target, classOf[AddIOPadsTransform], anno.serialize) def targetName = target.name } // Chisel version -case class TargetIOPadAnnoC(target: Element, anno: IOAnnotation) { - def getAnno = ChiselAnnotation(target, classOf[AddIOPadsTransform], anno.serialize) +case class TargetIOPadAnnoC(target: Element, anno: IOAnnotation) extends ChiselAnnotation { + def toFirrtl = TargetIOPadAnnoF(target.toNamed, anno) } // A bunch of supply pads (designated by name, # on each chip side) can be associated with the top module case class SupplyAnnotation( - padName: String, - leftSide: Int = 0, - rightSide: Int = 0, - topSide: Int = 0, + padName: String, + leftSide: Int = 0, + rightSide: Int = 0, + topSide: Int = 0, bottomSide: Int = 0) // The chip top should have a default pad side, a pad template file, and supply annotations case class ModulePadAnnotation( - defaultPadSide: String = Top.serialize, - coreWidth: Int = 0, + defaultPadSide: String = Top.serialize, + coreWidth: Int = 0, coreHeight: Int = 0, supplyAnnos: Seq[SupplyAnnotation] = Seq.empty) { import PadAnnotationsYaml._ @@ -63,13 +64,14 @@ case class ModulePadAnnotation( def getDefaultPadSide: PadSide = HasPadAnnotation.getSide(defaultPadSide) } // Firrtl version -case class TargetModulePadAnnoF(target: ModuleName, anno: ModulePadAnnotation) extends FirrtlPadTransformAnnotation { - def getAnno = Annotation(target, classOf[AddIOPadsTransform], anno.serialize) +case class TargetModulePadAnnoF(target: ModuleName, anno: ModulePadAnnotation) extends FirrtlPadTransformAnnotation with SingleTargetAnnotation[ModuleName] { + def duplicate(n: ModuleName): TargetModulePadAnnoF = this.copy(target = n) + def getAnno = Annotation(target, classOf[AddIOPadsTransform], anno.serialize) def targetName = target.name } // Chisel version -case class TargetModulePadAnnoC(target: Module, anno: ModulePadAnnotation) { - def getAnno = ChiselAnnotation(target, classOf[AddIOPadsTransform], anno.serialize) +case class TargetModulePadAnnoC(target: Module, anno: ModulePadAnnotation) extends ChiselAnnotation { + def toFirrtl = TargetModulePadAnnoF(target.toNamed, anno) } case class CollectedAnnos( @@ -95,9 +97,9 @@ object HasPadAnnotation { def unapply(a: Annotation): Option[FirrtlPadTransformAnnotation] = a match { case Annotation(f, t, s) if t == classOf[AddIOPadsTransform] => f match { - case m: ModuleName => + case m: ModuleName => Some(TargetModulePadAnnoF(m, s.parseYaml.convertTo[ModulePadAnnotation])) - case c: ComponentName if s.contains(NoIOPadAnnotation().field) => + case c: ComponentName if s.contains(NoIOPadAnnotation().field) => Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[NoIOPadAnnotation])) case c: ComponentName => Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[IOPadAnnotation])) @@ -108,26 +110,26 @@ object HasPadAnnotation { def apply(annos: Seq[Annotation]): Option[CollectedAnnos] = { // Get all pad-related annotations (config files, pad sides, pad names, etc.) - val padAnnos = annos.map(x => unapply(x)).flatten + val padAnnos = annos.map(x => unapply(x)).flatten val targets = padAnnos.map(x => x.targetName) require(targets.distinct.length == targets.length, "Only 1 pad related annotation is allowed per component/module") if (padAnnos.length == 0) None else { - val moduleAnnosTemp = padAnnos.filter { - case TargetModulePadAnnoF(_, _) => true + val moduleAnnosTemp = padAnnos.filter { + case TargetModulePadAnnoF(_, _) => true case _ => false } require(moduleAnnosTemp.length == 1, "Only 1 module may be designated 'Top'") val moduleAnnos = moduleAnnosTemp.head val topModName = moduleAnnos.targetName - val componentAnnos = padAnnos.filter { - case TargetIOPadAnnoF(ComponentName(_, ModuleName(n, _)), _) if n == topModName => + val componentAnnos = padAnnos.filter { + case TargetIOPadAnnoF(ComponentName(_, ModuleName(n, _)), _) if n == topModName => true - case TargetIOPadAnnoF(ComponentName(_, ModuleName(n, _)), _) if n != topModName => + case TargetIOPadAnnoF(ComponentName(_, ModuleName(n, _)), _) if n != topModName => throw new Exception("Pad related component annotations must all be in the same top module") case _ => false }.map(x => x.asInstanceOf[TargetIOPadAnnoF]) Some(CollectedAnnos(componentAnnos, moduleAnnos.asInstanceOf[TargetModulePadAnnoF])) } } -} \ No newline at end of file +} diff --git a/tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala b/tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala index 95a2cb1bd..cb3420b13 100644 --- a/tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala +++ b/tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala @@ -28,13 +28,6 @@ case object NoPad extends PadType { def serialize: String = "none" } -case object InOut extends Direction { - def serialize: String = "inout" -} -case object NoDirection extends Direction { - def serialize: String = "none" -} - abstract class PadSide extends FirrtlNode { def orientation: PadOrientation } @@ -53,4 +46,4 @@ case object Top extends PadSide { case object Bottom extends PadSide { def serialize: String = "bottom" def orientation: PadOrientation = Vertical -} \ No newline at end of file +} From db0efd38fc8c842d994c8dcce5230e83ac7ba829 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Wed, 19 Feb 2020 17:23:10 -0800 Subject: [PATCH 156/273] Fix CI tests --- macros/src/test/scala/CostFunction.scala | 8 +- macros/src/test/scala/MultiPort.scala | 56 ++-- macros/src/test/scala/SimpleSplitDepth.scala | 12 +- macros/src/test/scala/SimpleSplitWidth.scala | 28 +- macros/src/test/scala/SpecificExamples.scala | 272 +++++++++--------- macros/src/test/scala/SynFlops.scala | 8 +- mdf | 2 +- .../main/scala/transforms/ResetInverter.scala | 3 +- .../scala/transforms/ResetInverterSpec.scala | 2 +- .../scala/transforms/retime/RetimeSpec.scala | 4 +- 10 files changed, 200 insertions(+), 195 deletions(-) diff --git a/macros/src/test/scala/CostFunction.scala b/macros/src/test/scala/CostFunction.scala index c82080b27..35936ed14 100644 --- a/macros/src/test/scala/CostFunction.scala +++ b/macros/src/test/scala/CostFunction.scala @@ -82,22 +82,22 @@ circuit target_memory : mem_0_0.addr <= addr node dout_0_0 = bits(mem_0_0.dout, 31, 0) mem_0_0.din <= bits(din, 31, 0) - mem_0_0.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_0.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_1.clk <= clk mem_0_1.addr <= addr node dout_0_1 = bits(mem_0_1.dout, 31, 0) mem_0_1.din <= bits(din, 63, 32) - mem_0_1.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_2.clk <= clk mem_0_2.addr <= addr node dout_0_2 = bits(mem_0_2.dout, 31, 0) mem_0_2.din <= bits(din, 95, 64) - mem_0_2.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_2.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_3.clk <= clk mem_0_3.addr <= addr node dout_0_3 = bits(mem_0_3.dout, 31, 0) mem_0_3.din <= bits(din, 127, 96) - mem_0_3.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_3.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node dout_0 = cat(dout_0_3, cat(dout_0_2, cat(dout_0_1, dout_0_0))) dout <= mux(UInt<1>("h1"), dout_0, UInt<1>("h0")) diff --git a/macros/src/test/scala/MultiPort.scala b/macros/src/test/scala/MultiPort.scala index 470fee160..fdaae9f79 100644 --- a/macros/src/test/scala/MultiPort.scala +++ b/macros/src/test/scala/MultiPort.scala @@ -66,50 +66,50 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim node portA_dout_0_0 = bits(mem_0_0.portA_dout, 15, 0) mem_0_0.portA_din <= bits(portA_din, 15, 0) mem_0_0.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_0.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 0, 0)), UInt<1>("h1")) + mem_0_0.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 0, 0)), UInt<1>("h1")) mem_0_1.portA_clk <= portA_clk mem_0_1.portA_addr <= portA_addr node portA_dout_0_1 = bits(mem_0_1.portA_dout, 15, 0) mem_0_1.portA_din <= bits(portA_din, 31, 16) mem_0_1.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_1.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 1, 1)), UInt<1>("h1")) + mem_0_1.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 1, 1)), UInt<1>("h1")) mem_0_2.portA_clk <= portA_clk mem_0_2.portA_addr <= portA_addr node portA_dout_0_2 = bits(mem_0_2.portA_dout, 15, 0) mem_0_2.portA_din <= bits(portA_din, 47, 32) mem_0_2.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_2.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 2, 2)), UInt<1>("h1")) + mem_0_2.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 2, 2)), UInt<1>("h1")) mem_0_3.portA_clk <= portA_clk mem_0_3.portA_addr <= portA_addr node portA_dout_0_3 = bits(mem_0_3.portA_dout, 15, 0) mem_0_3.portA_din <= bits(portA_din, 63, 48) mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_3.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 3, 3)), UInt<1>("h1")) + mem_0_3.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 3, 3)), UInt<1>("h1")) node portA_dout_0 = cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))) mem_0_0.portB_clk <= portB_clk mem_0_0.portB_addr <= portB_addr node portB_dout_0_0 = bits(mem_0_0.portB_dout, 15, 0) mem_0_0.portB_din <= bits(portB_din, 15, 0) mem_0_0.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_0.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 0, 0)), UInt<1>("h1")) + mem_0_0.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 0, 0)), UInt<1>("h1")) mem_0_1.portB_clk <= portB_clk mem_0_1.portB_addr <= portB_addr node portB_dout_0_1 = bits(mem_0_1.portB_dout, 15, 0) mem_0_1.portB_din <= bits(portB_din, 31, 16) mem_0_1.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_1.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 1, 1)), UInt<1>("h1")) + mem_0_1.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 1, 1)), UInt<1>("h1")) mem_0_2.portB_clk <= portB_clk mem_0_2.portB_addr <= portB_addr node portB_dout_0_2 = bits(mem_0_2.portB_dout, 15, 0) mem_0_2.portB_din <= bits(portB_din, 47, 32) mem_0_2.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_2.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 2, 2)), UInt<1>("h1")) + mem_0_2.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 2, 2)), UInt<1>("h1")) mem_0_3.portB_clk <= portB_clk mem_0_3.portB_addr <= portB_addr node portB_dout_0_3 = bits(mem_0_3.portB_dout, 15, 0) mem_0_3.portB_din <= bits(portB_din, 63, 48) mem_0_3.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_3.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 3, 3)), UInt<1>("h1")) + mem_0_3.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 3, 3)), UInt<1>("h1")) node portB_dout_0 = cat(portB_dout_0_3, cat(portB_dout_0_2, cat(portB_dout_0_1, portB_dout_0_0))) portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<1>("h0")) portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<1>("h0")) @@ -185,19 +185,19 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS mem_0_0.portB_clk <= portB_clk mem_0_0.portB_addr <= portB_addr mem_0_0.portB_din <= bits(portB_din, 15, 0) - mem_0_0.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 0, 0)), UInt<1>("h1")) + mem_0_0.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 0, 0)), UInt<1>("h1")) mem_0_1.portB_clk <= portB_clk mem_0_1.portB_addr <= portB_addr mem_0_1.portB_din <= bits(portB_din, 31, 16) - mem_0_1.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 1, 1)), UInt<1>("h1")) + mem_0_1.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 1, 1)), UInt<1>("h1")) mem_0_2.portB_clk <= portB_clk mem_0_2.portB_addr <= portB_addr mem_0_2.portB_din <= bits(portB_din, 47, 32) - mem_0_2.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 2, 2)), UInt<1>("h1")) + mem_0_2.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 2, 2)), UInt<1>("h1")) mem_0_3.portB_clk <= portB_clk mem_0_3.portB_addr <= portB_addr mem_0_3.portB_din <= bits(portB_din, 63, 48) - mem_0_3.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 3, 3)), UInt<1>("h1")) + mem_0_3.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 3, 3)), UInt<1>("h1")) mem_0_0.portA_clk <= portA_clk mem_0_0.portA_addr <= portA_addr node portA_dout_0_0 = bits(mem_0_0.portA_dout, 15, 0) @@ -291,98 +291,98 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera node portA_dout_0_0 = bits(mem_0_0.portA_dout, 7, 0) mem_0_0.portA_din <= bits(portA_din, 7, 0) mem_0_0.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_0.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 0, 0)), UInt<1>("h1")) + mem_0_0.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 0, 0)), UInt<1>("h1")) mem_0_1.portA_clk <= portA_clk mem_0_1.portA_addr <= portA_addr node portA_dout_0_1 = bits(mem_0_1.portA_dout, 7, 0) mem_0_1.portA_din <= bits(portA_din, 15, 8) mem_0_1.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_1.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 0, 0)), UInt<1>("h1")) + mem_0_1.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 0, 0)), UInt<1>("h1")) mem_0_2.portA_clk <= portA_clk mem_0_2.portA_addr <= portA_addr node portA_dout_0_2 = bits(mem_0_2.portA_dout, 7, 0) mem_0_2.portA_din <= bits(portA_din, 23, 16) mem_0_2.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_2.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 1, 1)), UInt<1>("h1")) + mem_0_2.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 1, 1)), UInt<1>("h1")) mem_0_3.portA_clk <= portA_clk mem_0_3.portA_addr <= portA_addr node portA_dout_0_3 = bits(mem_0_3.portA_dout, 7, 0) mem_0_3.portA_din <= bits(portA_din, 31, 24) mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_3.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 1, 1)), UInt<1>("h1")) + mem_0_3.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 1, 1)), UInt<1>("h1")) mem_0_4.portA_clk <= portA_clk mem_0_4.portA_addr <= portA_addr node portA_dout_0_4 = bits(mem_0_4.portA_dout, 7, 0) mem_0_4.portA_din <= bits(portA_din, 39, 32) mem_0_4.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_4.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 2, 2)), UInt<1>("h1")) + mem_0_4.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 2, 2)), UInt<1>("h1")) mem_0_5.portA_clk <= portA_clk mem_0_5.portA_addr <= portA_addr node portA_dout_0_5 = bits(mem_0_5.portA_dout, 7, 0) mem_0_5.portA_din <= bits(portA_din, 47, 40) mem_0_5.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_5.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 2, 2)), UInt<1>("h1")) + mem_0_5.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 2, 2)), UInt<1>("h1")) mem_0_6.portA_clk <= portA_clk mem_0_6.portA_addr <= portA_addr node portA_dout_0_6 = bits(mem_0_6.portA_dout, 7, 0) mem_0_6.portA_din <= bits(portA_din, 55, 48) mem_0_6.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_6.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 3, 3)), UInt<1>("h1")) + mem_0_6.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 3, 3)), UInt<1>("h1")) mem_0_7.portA_clk <= portA_clk mem_0_7.portA_addr <= portA_addr node portA_dout_0_7 = bits(mem_0_7.portA_dout, 7, 0) mem_0_7.portA_din <= bits(portA_din, 63, 56) mem_0_7.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_7.portA_write_en <= and(and(portA_write_en, bits(portA_mask, 3, 3)), UInt<1>("h1")) + mem_0_7.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 3, 3)), UInt<1>("h1")) node portA_dout_0 = cat(portA_dout_0_7, cat(portA_dout_0_6, cat(portA_dout_0_5, cat(portA_dout_0_4, cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))))))) mem_0_0.portB_clk <= portB_clk mem_0_0.portB_addr <= portB_addr node portB_dout_0_0 = bits(mem_0_0.portB_dout, 7, 0) mem_0_0.portB_din <= bits(portB_din, 7, 0) mem_0_0.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_0.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 0, 0)), UInt<1>("h1")) + mem_0_0.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 0, 0)), UInt<1>("h1")) mem_0_1.portB_clk <= portB_clk mem_0_1.portB_addr <= portB_addr node portB_dout_0_1 = bits(mem_0_1.portB_dout, 7, 0) mem_0_1.portB_din <= bits(portB_din, 15, 8) mem_0_1.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_1.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 1, 1)), UInt<1>("h1")) + mem_0_1.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 1, 1)), UInt<1>("h1")) mem_0_2.portB_clk <= portB_clk mem_0_2.portB_addr <= portB_addr node portB_dout_0_2 = bits(mem_0_2.portB_dout, 7, 0) mem_0_2.portB_din <= bits(portB_din, 23, 16) mem_0_2.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_2.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 2, 2)), UInt<1>("h1")) + mem_0_2.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 2, 2)), UInt<1>("h1")) mem_0_3.portB_clk <= portB_clk mem_0_3.portB_addr <= portB_addr node portB_dout_0_3 = bits(mem_0_3.portB_dout, 7, 0) mem_0_3.portB_din <= bits(portB_din, 31, 24) mem_0_3.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_3.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 3, 3)), UInt<1>("h1")) + mem_0_3.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 3, 3)), UInt<1>("h1")) mem_0_4.portB_clk <= portB_clk mem_0_4.portB_addr <= portB_addr node portB_dout_0_4 = bits(mem_0_4.portB_dout, 7, 0) mem_0_4.portB_din <= bits(portB_din, 39, 32) mem_0_4.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_4.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 4, 4)), UInt<1>("h1")) + mem_0_4.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 4, 4)), UInt<1>("h1")) mem_0_5.portB_clk <= portB_clk mem_0_5.portB_addr <= portB_addr node portB_dout_0_5 = bits(mem_0_5.portB_dout, 7, 0) mem_0_5.portB_din <= bits(portB_din, 47, 40) mem_0_5.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_5.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 5, 5)), UInt<1>("h1")) + mem_0_5.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 5, 5)), UInt<1>("h1")) mem_0_6.portB_clk <= portB_clk mem_0_6.portB_addr <= portB_addr node portB_dout_0_6 = bits(mem_0_6.portB_dout, 7, 0) mem_0_6.portB_din <= bits(portB_din, 55, 48) mem_0_6.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_6.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 6, 6)), UInt<1>("h1")) + mem_0_6.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 6, 6)), UInt<1>("h1")) mem_0_7.portB_clk <= portB_clk mem_0_7.portB_addr <= portB_addr node portB_dout_0_7 = bits(mem_0_7.portB_dout, 7, 0) mem_0_7.portB_din <= bits(portB_din, 63, 56) mem_0_7.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_7.portB_write_en <= and(and(portB_write_en, bits(portB_mask, 7, 7)), UInt<1>("h1")) + mem_0_7.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 7, 7)), UInt<1>("h1")) node portB_dout_0 = cat(portB_dout_0_7, cat(portB_dout_0_6, cat(portB_dout_0_5, cat(portB_dout_0_4, cat(portB_dout_0_3, cat(portB_dout_0_2, cat(portB_dout_0_1, portB_dout_0_0))))))) portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<1>("h0")) portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<1>("h0")) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 18b4a9302..8df8ec7ee 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -31,6 +31,8 @@ s""" for (i <- 0 to depthInstances - 1) { val maskStatement = generateMaskStatement(0, i) val enableIdentifier = if (selectBits > 0) s"""eq(${memPortPrefix}_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" + val chipEnable = s"""UInt<1>("h1")""" + val writeEnable = if (memMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, ${chipEnable})" else s"${memPortPrefix}_write_en" output.append( s""" inst mem_${i}_0 of ${lib_name} @@ -39,7 +41,7 @@ s""" node ${memPortPrefix}_dout_${i}_0 = bits(mem_${i}_0.${libPortPrefix}_dout, ${width - 1}, 0) mem_${i}_0.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${width - 1}, 0) ${maskStatement} - mem_${i}_0.${libPortPrefix}_write_en <= and(and(${memPortPrefix}_write_en, UInt<1>("h1")), ${enableIdentifier}) + mem_${i}_0.${libPortPrefix}_write_en <= and(and(${writeEnable}, UInt<1>("h1")), ${enableIdentifier}) node ${memPortPrefix}_dout_${i} = ${memPortPrefix}_dout_${i}_0 """ ) @@ -273,7 +275,7 @@ circuit target_memory : node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) mem_0_0.lib_din <= bits(outer_din, 7, 0) - mem_0_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h0"))) + mem_0_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h0"))) node outer_dout_0 = outer_dout_0_0 inst mem_1_0 of awesome_lib_mem @@ -283,7 +285,7 @@ circuit target_memory : node outer_dout_1_0 = bits(mem_1_0.lib_dout, 7, 0) mem_1_0.lib_din <= bits(outer_din, 7, 0) - mem_1_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h1"))) + mem_1_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h1"))) node outer_dout_1 = outer_dout_1_0 outer_dout <= mux(eq(outer_addr_sel_reg, UInt<1>("h0")), outer_dout_0, mux(eq(outer_addr_sel_reg, UInt<1>("h1")), outer_dout_1, UInt<1>("h0"))) extmodule awesome_lib_mem : @@ -362,7 +364,7 @@ circuit target_memory : mem_0_0.innerB_clk <= outerA_clk mem_0_0.innerB_addr <= outerA_addr mem_0_0.innerB_din <= bits(outerA_din, 7, 0) - mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) + mem_0_0.innerB_write_en <= and(and(and(outerA_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) mem_0_0.innerA_clk <= outerB_clk mem_0_0.innerA_addr <= outerB_addr node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) @@ -371,7 +373,7 @@ circuit target_memory : mem_1_0.innerB_clk <= outerA_clk mem_1_0.innerB_addr <= outerA_addr mem_1_0.innerB_din <= bits(outerA_din, 7, 0) - mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) + mem_1_0.innerB_write_en <= and(and(and(outerA_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) mem_1_0.innerA_clk <= outerB_clk mem_1_0.innerA_addr <= outerB_addr node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index 3d26c18db..9cc10f9d9 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -38,6 +38,8 @@ trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { val outerMaskBit = myBaseBit / memMaskGran.get s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" } else """UInt<1>("h1")""" + val chipEnable = s"""UInt<1>("h1")""" + val writeEnableExpr = if (libMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, ${chipEnable})" else s"${memPortPrefix}_write_en" s""" mem_0_${i}.${libPortPrefix}_clk <= ${memPortPrefix}_clk @@ -45,7 +47,7 @@ s""" node ${memPortPrefix}_dout_0_${i} = bits(mem_0_${i}.${libPortPrefix}_dout, ${myMemWidth - 1}, 0) mem_0_${i}.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${myBaseBit + myMemWidth - 1}, ${myBaseBit}) ${maskStatement} - mem_0_${i}.${libPortPrefix}_write_en <= and(and(${memPortPrefix}_write_en, ${writeEnableBit}), UInt<1>("h1")) + mem_0_${i}.${libPortPrefix}_write_en <= and(and(${writeEnableExpr}, ${writeEnableBit}), UInt<1>("h1")) """ }).reduceLeft(_ + _) @@ -415,26 +417,26 @@ class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGen mem_0_0.lib_addr <= outer_addr node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) mem_0_0.lib_din <= bits(outer_din, 7, 0) - mem_0_0.lib_read_en <= and(not(outer_write_en), UInt<1>("h1")) - mem_0_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_0.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) + mem_0_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_1.lib_clk <= outer_clk mem_0_1.lib_addr <= outer_addr node outer_dout_0_1 = bits(mem_0_1.lib_dout, 7, 0) mem_0_1.lib_din <= bits(outer_din, 15, 8) - mem_0_1.lib_read_en <= and(not(outer_write_en), UInt<1>("h1")) - mem_0_1.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_2.lib_clk <= outer_clk mem_0_2.lib_addr <= outer_addr node outer_dout_0_2 = bits(mem_0_2.lib_dout, 7, 0) mem_0_2.lib_din <= bits(outer_din, 23, 16) - mem_0_2.lib_read_en <= and(not(outer_write_en), UInt<1>("h1")) - mem_0_2.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_2.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) + mem_0_2.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_3.lib_clk <= outer_clk mem_0_3.lib_addr <= outer_addr node outer_dout_0_3 = bits(mem_0_3.lib_dout, 7, 0) mem_0_3.lib_din <= bits(outer_din, 31, 24) - mem_0_3.lib_read_en <= and(not(outer_write_en), UInt<1>("h1")) - mem_0_3.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_3.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) + mem_0_3.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node outer_dout_0 = cat(outer_dout_0_3, cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0))) outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) """ @@ -514,25 +516,25 @@ class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAM node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) mem_0_0.lib_din <= bits(outer_din, 7, 0) mem_0_0.lib_read_en <= and(outer_read_en, UInt<1>("h1")) - mem_0_0.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_1.lib_clk <= outer_clk mem_0_1.lib_addr <= outer_addr node outer_dout_0_1 = bits(mem_0_1.lib_dout, 7, 0) mem_0_1.lib_din <= bits(outer_din, 15, 8) mem_0_1.lib_read_en <= and(outer_read_en, UInt<1>("h1")) - mem_0_1.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_1.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_2.lib_clk <= outer_clk mem_0_2.lib_addr <= outer_addr node outer_dout_0_2 = bits(mem_0_2.lib_dout, 7, 0) mem_0_2.lib_din <= bits(outer_din, 23, 16) mem_0_2.lib_read_en <= and(outer_read_en, UInt<1>("h1")) - mem_0_2.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_2.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_3.lib_clk <= outer_clk mem_0_3.lib_addr <= outer_addr node outer_dout_0_3 = bits(mem_0_3.lib_dout, 7, 0) mem_0_3.lib_din <= bits(outer_din, 31, 24) mem_0_3.lib_read_en <= and(outer_read_en, UInt<1>("h1")) - mem_0_3.lib_write_en <= and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_3.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node outer_dout_0 = cat(outer_dout_0_3, cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0))) outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) """ diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 7179d20f0..56f4500f2 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -59,34 +59,34 @@ class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { val output = """ - circuit cc_banks_0_ext : - module cc_banks_0_ext : - input RW0_addr : UInt<12> - input RW0_clk : Clock - input RW0_wdata : UInt<64> - output RW0_rdata : UInt<64> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - - inst mem_0_0 of fake_mem - mem_0_0.clk <= RW0_clk - mem_0_0.addr <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.dataout, 63, 0) - mem_0_0.datain <= bits(RW0_wdata, 63, 0) - mem_0_0.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) - mem_0_0.wen <= and(and(and(RW0_wmode, RW0_en), UInt<1>("h1")), UInt<1>("h1")) - node RW0_rdata_0 = RW0_rdata_0_0 - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) - - extmodule fake_mem : - input addr : UInt<12> - input clk : Clock - input datain : UInt<64> - output dataout : UInt<64> - input ren : UInt<1> - input wen : UInt<1> - - defname = fake_mem +circuit cc_banks_0_ext : + module cc_banks_0_ext : + input RW0_addr : UInt<12> + input RW0_clk : Clock + input RW0_wdata : UInt<64> + output RW0_rdata : UInt<64> + input RW0_en : UInt<1> + input RW0_wmode : UInt<1> + + inst mem_0_0 of fake_mem + mem_0_0.clk <= RW0_clk + mem_0_0.addr <= RW0_addr + node RW0_rdata_0_0 = bits(mem_0_0.dataout, 63, 0) + mem_0_0.datain <= bits(RW0_wdata, 63, 0) + mem_0_0.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) + mem_0_0.wen <= and(and(and(RW0_wmode, RW0_en), UInt<1>("h1")), UInt<1>("h1")) + node RW0_rdata_0 = RW0_rdata_0_0 + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + + extmodule fake_mem : + input addr : UInt<12> + input clk : Clock + input datain : UInt<64> + output dataout : UInt<64> + input ren : UInt<1> + input wen : UInt<1> + + defname = fake_mem """ compileExecuteAndTest(mem, lib, v, output) @@ -148,14 +148,14 @@ circuit cc_dir_ext : mem_0_0.addr <= RW0_addr node RW0_rdata_0_0 = bits(mem_0_0.dataout, 63, 0) mem_0_0.datain <= bits(RW0_wdata, 63, 0) - mem_0_0.ren <= and(not(RW0_wmode), UInt<1>("h1")) + mem_0_0.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) mem_0_0.mport <= not(cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), bits(RW0_wmask, 0, 0))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) mem_0_0.wen <= and(and(RW0_wmode, RW0_en), UInt<1>("h1")) mem_0_1.clk <= RW0_clk mem_0_1.addr <= RW0_addr node RW0_rdata_0_1 = bits(mem_0_1.dataout, 63, 0) mem_0_1.datain <= bits(RW0_wdata, 127, 64) - mem_0_1.ren <= and(not(RW0_wmode), UInt<1>("h1")) + mem_0_1.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) mem_0_1.mport <= not(cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), bits(RW0_wmask, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) mem_0_1.wen <= and(and(RW0_wmode, RW0_en), UInt<1>("h1")) node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) @@ -379,53 +379,53 @@ circuit smem_0_ext : mem_0_0.CE1 <= W0_clk mem_0_0.A1 <= W0_addr mem_0_0.I1 <= bits(W0_data, 21, 0) - mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h0")))) mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<1>("h0")))) mem_0_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) mem_0_1.CE1 <= W0_clk mem_0_1.A1 <= W0_addr mem_0_1.I1 <= bits(W0_data, 43, 22) - mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h0")))) mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), eq(W0_addr_sel, UInt<1>("h0")))) mem_0_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) mem_0_2.CE1 <= W0_clk mem_0_2.A1 <= W0_addr mem_0_2.I1 <= bits(W0_data, 65, 44) - mem_0_2.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_2.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h0")))) mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), eq(W0_addr_sel, UInt<1>("h0")))) mem_0_2.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) mem_0_3.CE1 <= W0_clk mem_0_3.A1 <= W0_addr mem_0_3.I1 <= bits(W0_data, 87, 66) - mem_0_3.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h0")))) + mem_0_3.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h0")))) mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), eq(W0_addr_sel, UInt<1>("h0")))) mem_0_3.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) mem_0_0.CE2 <= R0_clk mem_0_0.A2 <= R0_addr node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) mem_0_0.I2 is invalid - mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) mem_0_1.CE2 <= R0_clk mem_0_1.A2 <= R0_addr node R0_data_0_1 = bits(mem_0_1.O2, 21, 0) mem_0_1.I2 is invalid - mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) mem_0_2.CE2 <= R0_clk mem_0_2.A2 <= R0_addr node R0_data_0_2 = bits(mem_0_2.O2, 21, 0) mem_0_2.I2 is invalid - mem_0_2.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_2.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_2.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) mem_0_3.CE2 <= R0_clk mem_0_3.A2 <= R0_addr node R0_data_0_3 = bits(mem_0_3.O2, 21, 0) mem_0_3.I2 is invalid - mem_0_3.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h0")))) + mem_0_3.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) mem_0_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) @@ -436,53 +436,53 @@ circuit smem_0_ext : mem_1_0.CE1 <= W0_clk mem_1_0.A1 <= W0_addr mem_1_0.I1 <= bits(W0_data, 21, 0) - mem_1_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h1")))) mem_1_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<1>("h1")))) mem_1_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) mem_1_1.CE1 <= W0_clk mem_1_1.A1 <= W0_addr mem_1_1.I1 <= bits(W0_data, 43, 22) - mem_1_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h1")))) mem_1_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), eq(W0_addr_sel, UInt<1>("h1")))) mem_1_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) mem_1_2.CE1 <= W0_clk mem_1_2.A1 <= W0_addr mem_1_2.I1 <= bits(W0_data, 65, 44) - mem_1_2.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_2.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h1")))) mem_1_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), eq(W0_addr_sel, UInt<1>("h1")))) mem_1_2.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) mem_1_3.CE1 <= W0_clk mem_1_3.A1 <= W0_addr mem_1_3.I1 <= bits(W0_data, 87, 66) - mem_1_3.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<1>("h1")))) + mem_1_3.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h1")))) mem_1_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), eq(W0_addr_sel, UInt<1>("h1")))) mem_1_3.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) mem_1_0.CE2 <= R0_clk mem_1_0.A2 <= R0_addr node R0_data_1_0 = bits(mem_1_0.O2, 21, 0) mem_1_0.I2 is invalid - mem_1_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) mem_1_1.CE2 <= R0_clk mem_1_1.A2 <= R0_addr node R0_data_1_1 = bits(mem_1_1.O2, 21, 0) mem_1_1.I2 is invalid - mem_1_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) mem_1_2.CE2 <= R0_clk mem_1_2.A2 <= R0_addr node R0_data_1_2 = bits(mem_1_2.O2, 21, 0) mem_1_2.I2 is invalid - mem_1_2.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_2.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_2.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) mem_1_3.CE2 <= R0_clk mem_1_3.A2 <= R0_addr node R0_data_1_3 = bits(mem_1_3.O2, 21, 0) mem_1_3.I2 is invalid - mem_1_3.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<1>("h1")))) + mem_1_3.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) node R0_data_1 = cat(R0_data_1_3, cat(R0_data_1_2, cat(R0_data_1_1, R0_data_1_0))) @@ -510,27 +510,27 @@ circuit smem_0_ext : mem_0_0.CE1 <= W0_clk mem_0_0.A1 <= W0_addr mem_0_0.I1 <= bits(W0_data, 31, 0) - mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h0")))) + mem_0_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h0")))) mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h0")))) mem_0_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h0")))) mem_0_1.CE1 <= W0_clk mem_0_1.A1 <= W0_addr mem_0_1.I1 <= bits(W0_data, 63, 32) - mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h0")))) + mem_0_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h0")))) mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h0")))) mem_0_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h0")))) mem_0_0.CE2 <= R0_clk mem_0_0.A2 <= R0_addr node R0_data_0_0 = bits(mem_0_0.O2, 31, 0) mem_0_0.I2 is invalid - mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h0")))) + mem_0_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h0")))) mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h0")))) mem_0_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h0")))) mem_0_1.CE2 <= R0_clk mem_0_1.A2 <= R0_addr node R0_data_0_1 = bits(mem_0_1.O2, 31, 0) mem_0_1.I2 is invalid - mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h0")))) + mem_0_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h0")))) mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h0")))) mem_0_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h0")))) node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) @@ -539,27 +539,27 @@ circuit smem_0_ext : mem_1_0.CE1 <= W0_clk mem_1_0.A1 <= W0_addr mem_1_0.I1 <= bits(W0_data, 31, 0) - mem_1_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h1")))) + mem_1_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h1")))) mem_1_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h1")))) mem_1_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h1")))) mem_1_1.CE1 <= W0_clk mem_1_1.A1 <= W0_addr mem_1_1.I1 <= bits(W0_data, 63, 32) - mem_1_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h1")))) + mem_1_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h1")))) mem_1_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h1")))) mem_1_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h1")))) mem_1_0.CE2 <= R0_clk mem_1_0.A2 <= R0_addr node R0_data_1_0 = bits(mem_1_0.O2, 31, 0) mem_1_0.I2 is invalid - mem_1_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h1")))) + mem_1_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h1")))) mem_1_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h1")))) mem_1_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h1")))) mem_1_1.CE2 <= R0_clk mem_1_1.A2 <= R0_addr node R0_data_1_1 = bits(mem_1_1.O2, 31, 0) mem_1_1.I2 is invalid - mem_1_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h1")))) + mem_1_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h1")))) mem_1_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h1")))) mem_1_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h1")))) node R0_data_1 = cat(R0_data_1_1, R0_data_1_0) @@ -568,27 +568,27 @@ circuit smem_0_ext : mem_2_0.CE1 <= W0_clk mem_2_0.A1 <= W0_addr mem_2_0.I1 <= bits(W0_data, 31, 0) - mem_2_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h2")))) + mem_2_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h2")))) mem_2_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h2")))) mem_2_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h2")))) mem_2_1.CE1 <= W0_clk mem_2_1.A1 <= W0_addr mem_2_1.I1 <= bits(W0_data, 63, 32) - mem_2_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h2")))) + mem_2_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h2")))) mem_2_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h2")))) mem_2_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h2")))) mem_2_0.CE2 <= R0_clk mem_2_0.A2 <= R0_addr node R0_data_2_0 = bits(mem_2_0.O2, 31, 0) mem_2_0.I2 is invalid - mem_2_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h2")))) + mem_2_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h2")))) mem_2_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h2")))) mem_2_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h2")))) mem_2_1.CE2 <= R0_clk mem_2_1.A2 <= R0_addr node R0_data_2_1 = bits(mem_2_1.O2, 31, 0) mem_2_1.I2 is invalid - mem_2_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h2")))) + mem_2_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h2")))) mem_2_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h2")))) mem_2_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h2")))) node R0_data_2 = cat(R0_data_2_1, R0_data_2_0) @@ -597,27 +597,27 @@ circuit smem_0_ext : mem_3_0.CE1 <= W0_clk mem_3_0.A1 <= W0_addr mem_3_0.I1 <= bits(W0_data, 31, 0) - mem_3_0.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h3")))) + mem_3_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h3")))) mem_3_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h3")))) mem_3_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h3")))) mem_3_1.CE1 <= W0_clk mem_3_1.A1 <= W0_addr mem_3_1.I1 <= bits(W0_data, 63, 32) - mem_3_1.OEB1 <= not(and(not(UInt<1>("h1")), eq(W0_addr_sel, UInt<2>("h3")))) + mem_3_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h3")))) mem_3_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h3")))) mem_3_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h3")))) mem_3_0.CE2 <= R0_clk mem_3_0.A2 <= R0_addr node R0_data_3_0 = bits(mem_3_0.O2, 31, 0) mem_3_0.I2 is invalid - mem_3_0.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h3")))) + mem_3_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) mem_3_1.CE2 <= R0_clk mem_3_1.A2 <= R0_addr node R0_data_3_1 = bits(mem_3_1.O2, 31, 0) mem_3_1.I2 is invalid - mem_3_1.OEB2 <= not(and(not(UInt<1>("h0")), eq(R0_addr_sel, UInt<2>("h3")))) + mem_3_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) node R0_data_3 = cat(R0_data_3_1, R0_data_3_0) @@ -659,28 +659,28 @@ circuit smem_0_ext : mem_0_0.A <= RW0_addr node RW0_rdata_0_0 = bits(mem_0_0.O, 19, 0) mem_0_0.I <= bits(RW0_wdata, 19, 0) - mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_1.CE <= RW0_clk mem_0_1.A <= RW0_addr node RW0_rdata_0_1 = bits(mem_0_1.O, 19, 0) mem_0_1.I <= bits(RW0_wdata, 39, 20) - mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_2.CE <= RW0_clk mem_0_2.A <= RW0_addr node RW0_rdata_0_2 = bits(mem_0_2.O, 19, 0) mem_0_2.I <= bits(RW0_wdata, 59, 40) - mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_2.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_3.CE <= RW0_clk mem_0_3.A <= RW0_addr node RW0_rdata_0_3 = bits(mem_0_3.O, 19, 0) mem_0_3.I <= bits(RW0_wdata, 79, 60) - mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_3.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) @@ -712,14 +712,14 @@ circuit smem_0_ext : mem_0_0.A <= RW0_addr node RW0_rdata_0_0 = bits(mem_0_0.O, 31, 0) mem_0_0.I <= bits(RW0_wdata, 31, 0) - mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_0.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_1.CE <= RW0_clk mem_0_1.A <= RW0_addr node RW0_rdata_0_1 = bits(mem_0_1.O, 31, 0) mem_0_1.I <= bits(RW0_wdata, 63, 32) - mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_1.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) @@ -752,27 +752,27 @@ circuit smem_0_ext : mem_0_0.CE1 <= W0_clk mem_0_0.A1 <= W0_addr mem_0_0.I1 <= bits(W0_data, 21, 0) - mem_0_0.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), UInt<1>("h1"))) mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_0.CSB1 <= not(and(W0_en, UInt<1>("h1"))) mem_0_1.CE1 <= W0_clk mem_0_1.A1 <= W0_addr mem_0_1.I1 <= bits(W0_data, 39, 22) - mem_0_1.OEB1 <= not(and(not(UInt<1>("h1")), UInt<1>("h1"))) + mem_0_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), UInt<1>("h1"))) mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_1.CSB1 <= not(and(W0_en, UInt<1>("h1"))) mem_0_0.CE2 <= R0_clk mem_0_0.A2 <= R0_addr node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) mem_0_0.I2 is invalid - mem_0_0.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), UInt<1>("h1"))) mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_0.CSB2 <= not(and(R0_en, UInt<1>("h1"))) mem_0_1.CE2 <= R0_clk mem_0_1.A2 <= R0_addr node R0_data_0_1 = bits(mem_0_1.O2, 17, 0) mem_0_1.I2 is invalid - mem_0_1.OEB2 <= not(and(not(UInt<1>("h0")), UInt<1>("h1"))) + mem_0_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), UInt<1>("h1"))) mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) @@ -842,224 +842,224 @@ circuit smem_0_ext : mem_0_0.A <= RW0_addr node RW0_rdata_0_0 = bits(mem_0_0.O, 0, 0) mem_0_0.I <= bits(RW0_wdata, 0, 0) - mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_1.CE <= RW0_clk mem_0_1.A <= RW0_addr node RW0_rdata_0_1 = bits(mem_0_1.O, 0, 0) mem_0_1.I <= bits(RW0_wdata, 1, 1) - mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_2.CE <= RW0_clk mem_0_2.A <= RW0_addr node RW0_rdata_0_2 = bits(mem_0_2.O, 0, 0) mem_0_2.I <= bits(RW0_wdata, 2, 2) - mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_2.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_3.CE <= RW0_clk mem_0_3.A <= RW0_addr node RW0_rdata_0_3 = bits(mem_0_3.O, 0, 0) mem_0_3.I <= bits(RW0_wdata, 3, 3) - mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_3.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_4.CE <= RW0_clk mem_0_4.A <= RW0_addr node RW0_rdata_0_4 = bits(mem_0_4.O, 0, 0) mem_0_4.I <= bits(RW0_wdata, 4, 4) - mem_0_4.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_4.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) mem_0_4.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_5.CE <= RW0_clk mem_0_5.A <= RW0_addr node RW0_rdata_0_5 = bits(mem_0_5.O, 0, 0) mem_0_5.I <= bits(RW0_wdata, 5, 5) - mem_0_5.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_5.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) mem_0_5.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_6.CE <= RW0_clk mem_0_6.A <= RW0_addr node RW0_rdata_0_6 = bits(mem_0_6.O, 0, 0) mem_0_6.I <= bits(RW0_wdata, 6, 6) - mem_0_6.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_6.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) mem_0_6.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_7.CE <= RW0_clk mem_0_7.A <= RW0_addr node RW0_rdata_0_7 = bits(mem_0_7.O, 0, 0) mem_0_7.I <= bits(RW0_wdata, 7, 7) - mem_0_7.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_7.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) mem_0_7.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_8.CE <= RW0_clk mem_0_8.A <= RW0_addr node RW0_rdata_0_8 = bits(mem_0_8.O, 0, 0) mem_0_8.I <= bits(RW0_wdata, 8, 8) - mem_0_8.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_8.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_8.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 8, 8)), UInt<1>("h1"))) mem_0_8.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_9.CE <= RW0_clk mem_0_9.A <= RW0_addr node RW0_rdata_0_9 = bits(mem_0_9.O, 0, 0) mem_0_9.I <= bits(RW0_wdata, 9, 9) - mem_0_9.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_9.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_9.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 9, 9)), UInt<1>("h1"))) mem_0_9.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_10.CE <= RW0_clk mem_0_10.A <= RW0_addr node RW0_rdata_0_10 = bits(mem_0_10.O, 0, 0) mem_0_10.I <= bits(RW0_wdata, 10, 10) - mem_0_10.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_10.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_10.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 10, 10)), UInt<1>("h1"))) mem_0_10.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_11.CE <= RW0_clk mem_0_11.A <= RW0_addr node RW0_rdata_0_11 = bits(mem_0_11.O, 0, 0) mem_0_11.I <= bits(RW0_wdata, 11, 11) - mem_0_11.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_11.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_11.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 11, 11)), UInt<1>("h1"))) mem_0_11.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_12.CE <= RW0_clk mem_0_12.A <= RW0_addr node RW0_rdata_0_12 = bits(mem_0_12.O, 0, 0) mem_0_12.I <= bits(RW0_wdata, 12, 12) - mem_0_12.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_12.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_12.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 12, 12)), UInt<1>("h1"))) mem_0_12.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_13.CE <= RW0_clk mem_0_13.A <= RW0_addr node RW0_rdata_0_13 = bits(mem_0_13.O, 0, 0) mem_0_13.I <= bits(RW0_wdata, 13, 13) - mem_0_13.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_13.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_13.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 13, 13)), UInt<1>("h1"))) mem_0_13.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_14.CE <= RW0_clk mem_0_14.A <= RW0_addr node RW0_rdata_0_14 = bits(mem_0_14.O, 0, 0) mem_0_14.I <= bits(RW0_wdata, 14, 14) - mem_0_14.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_14.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_14.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 14, 14)), UInt<1>("h1"))) mem_0_14.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_15.CE <= RW0_clk mem_0_15.A <= RW0_addr node RW0_rdata_0_15 = bits(mem_0_15.O, 0, 0) mem_0_15.I <= bits(RW0_wdata, 15, 15) - mem_0_15.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_15.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_15.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 15, 15)), UInt<1>("h1"))) mem_0_15.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_16.CE <= RW0_clk mem_0_16.A <= RW0_addr node RW0_rdata_0_16 = bits(mem_0_16.O, 0, 0) mem_0_16.I <= bits(RW0_wdata, 16, 16) - mem_0_16.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_16.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_16.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 16, 16)), UInt<1>("h1"))) mem_0_16.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_17.CE <= RW0_clk mem_0_17.A <= RW0_addr node RW0_rdata_0_17 = bits(mem_0_17.O, 0, 0) mem_0_17.I <= bits(RW0_wdata, 17, 17) - mem_0_17.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_17.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_17.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 17, 17)), UInt<1>("h1"))) mem_0_17.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_18.CE <= RW0_clk mem_0_18.A <= RW0_addr node RW0_rdata_0_18 = bits(mem_0_18.O, 0, 0) mem_0_18.I <= bits(RW0_wdata, 18, 18) - mem_0_18.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_18.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_18.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 18, 18)), UInt<1>("h1"))) mem_0_18.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_19.CE <= RW0_clk mem_0_19.A <= RW0_addr node RW0_rdata_0_19 = bits(mem_0_19.O, 0, 0) mem_0_19.I <= bits(RW0_wdata, 19, 19) - mem_0_19.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_19.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_19.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 19, 19)), UInt<1>("h1"))) mem_0_19.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_20.CE <= RW0_clk mem_0_20.A <= RW0_addr node RW0_rdata_0_20 = bits(mem_0_20.O, 0, 0) mem_0_20.I <= bits(RW0_wdata, 20, 20) - mem_0_20.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_20.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_20.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 20, 20)), UInt<1>("h1"))) mem_0_20.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_21.CE <= RW0_clk mem_0_21.A <= RW0_addr node RW0_rdata_0_21 = bits(mem_0_21.O, 0, 0) mem_0_21.I <= bits(RW0_wdata, 21, 21) - mem_0_21.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_21.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_21.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 21, 21)), UInt<1>("h1"))) mem_0_21.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_22.CE <= RW0_clk mem_0_22.A <= RW0_addr node RW0_rdata_0_22 = bits(mem_0_22.O, 0, 0) mem_0_22.I <= bits(RW0_wdata, 22, 22) - mem_0_22.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_22.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_22.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 22, 22)), UInt<1>("h1"))) mem_0_22.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_23.CE <= RW0_clk mem_0_23.A <= RW0_addr node RW0_rdata_0_23 = bits(mem_0_23.O, 0, 0) mem_0_23.I <= bits(RW0_wdata, 23, 23) - mem_0_23.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_23.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_23.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 23, 23)), UInt<1>("h1"))) mem_0_23.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_24.CE <= RW0_clk mem_0_24.A <= RW0_addr node RW0_rdata_0_24 = bits(mem_0_24.O, 0, 0) mem_0_24.I <= bits(RW0_wdata, 24, 24) - mem_0_24.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_24.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_24.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 24, 24)), UInt<1>("h1"))) mem_0_24.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_25.CE <= RW0_clk mem_0_25.A <= RW0_addr node RW0_rdata_0_25 = bits(mem_0_25.O, 0, 0) mem_0_25.I <= bits(RW0_wdata, 25, 25) - mem_0_25.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_25.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_25.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 25, 25)), UInt<1>("h1"))) mem_0_25.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_26.CE <= RW0_clk mem_0_26.A <= RW0_addr node RW0_rdata_0_26 = bits(mem_0_26.O, 0, 0) mem_0_26.I <= bits(RW0_wdata, 26, 26) - mem_0_26.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_26.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_26.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 26, 26)), UInt<1>("h1"))) mem_0_26.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_27.CE <= RW0_clk mem_0_27.A <= RW0_addr node RW0_rdata_0_27 = bits(mem_0_27.O, 0, 0) mem_0_27.I <= bits(RW0_wdata, 27, 27) - mem_0_27.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_27.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_27.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 27, 27)), UInt<1>("h1"))) mem_0_27.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_28.CE <= RW0_clk mem_0_28.A <= RW0_addr node RW0_rdata_0_28 = bits(mem_0_28.O, 0, 0) mem_0_28.I <= bits(RW0_wdata, 28, 28) - mem_0_28.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_28.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_28.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 28, 28)), UInt<1>("h1"))) mem_0_28.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_29.CE <= RW0_clk mem_0_29.A <= RW0_addr node RW0_rdata_0_29 = bits(mem_0_29.O, 0, 0) mem_0_29.I <= bits(RW0_wdata, 29, 29) - mem_0_29.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_29.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_29.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 29, 29)), UInt<1>("h1"))) mem_0_29.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_30.CE <= RW0_clk mem_0_30.A <= RW0_addr node RW0_rdata_0_30 = bits(mem_0_30.O, 0, 0) mem_0_30.I <= bits(RW0_wdata, 30, 30) - mem_0_30.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_30.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_30.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 30, 30)), UInt<1>("h1"))) mem_0_30.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_31.CE <= RW0_clk mem_0_31.A <= RW0_addr node RW0_rdata_0_31 = bits(mem_0_31.O, 0, 0) mem_0_31.I <= bits(RW0_wdata, 31, 31) - mem_0_31.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_31.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_31.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 31, 31)), UInt<1>("h1"))) mem_0_31.CSB <= not(and(RW0_en, UInt<1>("h1"))) node RW0_rdata_0 = cat(RW0_rdata_0_31, cat(RW0_rdata_0_30, cat(RW0_rdata_0_29, cat(RW0_rdata_0_28, cat(RW0_rdata_0_27, cat(RW0_rdata_0_26, cat(RW0_rdata_0_25, cat(RW0_rdata_0_24, cat(RW0_rdata_0_23, cat(RW0_rdata_0_22, cat(RW0_rdata_0_21, cat(RW0_rdata_0_20, cat(RW0_rdata_0_19, cat(RW0_rdata_0_18, cat(RW0_rdata_0_17, cat(RW0_rdata_0_16, cat(RW0_rdata_0_15, cat(RW0_rdata_0_14, cat(RW0_rdata_0_13, cat(RW0_rdata_0_12, cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))))))))))))))))))))))) @@ -1110,224 +1110,224 @@ circuit smem_0_ext : mem_0_0.A <= RW0_addr node RW0_rdata_0_0 = bits(mem_0_0.O, 0, 0) mem_0_0.I <= bits(RW0_wdata, 0, 0) - mem_0_0.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_0.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_1.CE <= RW0_clk mem_0_1.A <= RW0_addr node RW0_rdata_0_1 = bits(mem_0_1.O, 0, 0) mem_0_1.I <= bits(RW0_wdata, 1, 1) - mem_0_1.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_1.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_2.CE <= RW0_clk mem_0_2.A <= RW0_addr node RW0_rdata_0_2 = bits(mem_0_2.O, 0, 0) mem_0_2.I <= bits(RW0_wdata, 2, 2) - mem_0_2.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_2.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_3.CE <= RW0_clk mem_0_3.A <= RW0_addr node RW0_rdata_0_3 = bits(mem_0_3.O, 0, 0) mem_0_3.I <= bits(RW0_wdata, 3, 3) - mem_0_3.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_3.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_4.CE <= RW0_clk mem_0_4.A <= RW0_addr node RW0_rdata_0_4 = bits(mem_0_4.O, 0, 0) mem_0_4.I <= bits(RW0_wdata, 4, 4) - mem_0_4.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_4.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) mem_0_4.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_5.CE <= RW0_clk mem_0_5.A <= RW0_addr node RW0_rdata_0_5 = bits(mem_0_5.O, 0, 0) mem_0_5.I <= bits(RW0_wdata, 5, 5) - mem_0_5.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_5.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) mem_0_5.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_6.CE <= RW0_clk mem_0_6.A <= RW0_addr node RW0_rdata_0_6 = bits(mem_0_6.O, 0, 0) mem_0_6.I <= bits(RW0_wdata, 6, 6) - mem_0_6.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_6.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) mem_0_6.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_7.CE <= RW0_clk mem_0_7.A <= RW0_addr node RW0_rdata_0_7 = bits(mem_0_7.O, 0, 0) mem_0_7.I <= bits(RW0_wdata, 7, 7) - mem_0_7.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_7.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) mem_0_7.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_8.CE <= RW0_clk mem_0_8.A <= RW0_addr node RW0_rdata_0_8 = bits(mem_0_8.O, 0, 0) mem_0_8.I <= bits(RW0_wdata, 8, 8) - mem_0_8.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_8.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_8.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 8, 8)), UInt<1>("h1"))) mem_0_8.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_9.CE <= RW0_clk mem_0_9.A <= RW0_addr node RW0_rdata_0_9 = bits(mem_0_9.O, 0, 0) mem_0_9.I <= bits(RW0_wdata, 9, 9) - mem_0_9.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_9.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_9.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 9, 9)), UInt<1>("h1"))) mem_0_9.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_10.CE <= RW0_clk mem_0_10.A <= RW0_addr node RW0_rdata_0_10 = bits(mem_0_10.O, 0, 0) mem_0_10.I <= bits(RW0_wdata, 10, 10) - mem_0_10.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_10.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_10.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 10, 10)), UInt<1>("h1"))) mem_0_10.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_11.CE <= RW0_clk mem_0_11.A <= RW0_addr node RW0_rdata_0_11 = bits(mem_0_11.O, 0, 0) mem_0_11.I <= bits(RW0_wdata, 11, 11) - mem_0_11.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_11.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_11.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 11, 11)), UInt<1>("h1"))) mem_0_11.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_12.CE <= RW0_clk mem_0_12.A <= RW0_addr node RW0_rdata_0_12 = bits(mem_0_12.O, 0, 0) mem_0_12.I <= bits(RW0_wdata, 12, 12) - mem_0_12.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_12.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_12.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 12, 12)), UInt<1>("h1"))) mem_0_12.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_13.CE <= RW0_clk mem_0_13.A <= RW0_addr node RW0_rdata_0_13 = bits(mem_0_13.O, 0, 0) mem_0_13.I <= bits(RW0_wdata, 13, 13) - mem_0_13.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_13.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_13.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 13, 13)), UInt<1>("h1"))) mem_0_13.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_14.CE <= RW0_clk mem_0_14.A <= RW0_addr node RW0_rdata_0_14 = bits(mem_0_14.O, 0, 0) mem_0_14.I <= bits(RW0_wdata, 14, 14) - mem_0_14.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_14.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_14.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 14, 14)), UInt<1>("h1"))) mem_0_14.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_15.CE <= RW0_clk mem_0_15.A <= RW0_addr node RW0_rdata_0_15 = bits(mem_0_15.O, 0, 0) mem_0_15.I <= bits(RW0_wdata, 15, 15) - mem_0_15.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_15.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_15.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 15, 15)), UInt<1>("h1"))) mem_0_15.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_16.CE <= RW0_clk mem_0_16.A <= RW0_addr node RW0_rdata_0_16 = bits(mem_0_16.O, 0, 0) mem_0_16.I <= bits(RW0_wdata, 16, 16) - mem_0_16.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_16.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_16.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 16, 16)), UInt<1>("h1"))) mem_0_16.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_17.CE <= RW0_clk mem_0_17.A <= RW0_addr node RW0_rdata_0_17 = bits(mem_0_17.O, 0, 0) mem_0_17.I <= bits(RW0_wdata, 17, 17) - mem_0_17.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_17.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_17.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 17, 17)), UInt<1>("h1"))) mem_0_17.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_18.CE <= RW0_clk mem_0_18.A <= RW0_addr node RW0_rdata_0_18 = bits(mem_0_18.O, 0, 0) mem_0_18.I <= bits(RW0_wdata, 18, 18) - mem_0_18.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_18.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_18.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 18, 18)), UInt<1>("h1"))) mem_0_18.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_19.CE <= RW0_clk mem_0_19.A <= RW0_addr node RW0_rdata_0_19 = bits(mem_0_19.O, 0, 0) mem_0_19.I <= bits(RW0_wdata, 19, 19) - mem_0_19.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_19.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_19.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 19, 19)), UInt<1>("h1"))) mem_0_19.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_20.CE <= RW0_clk mem_0_20.A <= RW0_addr node RW0_rdata_0_20 = bits(mem_0_20.O, 0, 0) mem_0_20.I <= bits(RW0_wdata, 20, 20) - mem_0_20.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_20.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_20.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 20, 20)), UInt<1>("h1"))) mem_0_20.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_21.CE <= RW0_clk mem_0_21.A <= RW0_addr node RW0_rdata_0_21 = bits(mem_0_21.O, 0, 0) mem_0_21.I <= bits(RW0_wdata, 21, 21) - mem_0_21.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_21.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_21.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 21, 21)), UInt<1>("h1"))) mem_0_21.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_22.CE <= RW0_clk mem_0_22.A <= RW0_addr node RW0_rdata_0_22 = bits(mem_0_22.O, 0, 0) mem_0_22.I <= bits(RW0_wdata, 22, 22) - mem_0_22.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_22.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_22.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 22, 22)), UInt<1>("h1"))) mem_0_22.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_23.CE <= RW0_clk mem_0_23.A <= RW0_addr node RW0_rdata_0_23 = bits(mem_0_23.O, 0, 0) mem_0_23.I <= bits(RW0_wdata, 23, 23) - mem_0_23.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_23.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_23.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 23, 23)), UInt<1>("h1"))) mem_0_23.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_24.CE <= RW0_clk mem_0_24.A <= RW0_addr node RW0_rdata_0_24 = bits(mem_0_24.O, 0, 0) mem_0_24.I <= bits(RW0_wdata, 24, 24) - mem_0_24.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_24.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_24.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 24, 24)), UInt<1>("h1"))) mem_0_24.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_25.CE <= RW0_clk mem_0_25.A <= RW0_addr node RW0_rdata_0_25 = bits(mem_0_25.O, 0, 0) mem_0_25.I <= bits(RW0_wdata, 25, 25) - mem_0_25.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_25.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_25.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 25, 25)), UInt<1>("h1"))) mem_0_25.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_26.CE <= RW0_clk mem_0_26.A <= RW0_addr node RW0_rdata_0_26 = bits(mem_0_26.O, 0, 0) mem_0_26.I <= bits(RW0_wdata, 26, 26) - mem_0_26.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_26.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_26.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 26, 26)), UInt<1>("h1"))) mem_0_26.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_27.CE <= RW0_clk mem_0_27.A <= RW0_addr node RW0_rdata_0_27 = bits(mem_0_27.O, 0, 0) mem_0_27.I <= bits(RW0_wdata, 27, 27) - mem_0_27.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_27.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_27.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 27, 27)), UInt<1>("h1"))) mem_0_27.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_28.CE <= RW0_clk mem_0_28.A <= RW0_addr node RW0_rdata_0_28 = bits(mem_0_28.O, 0, 0) mem_0_28.I <= bits(RW0_wdata, 28, 28) - mem_0_28.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_28.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_28.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 28, 28)), UInt<1>("h1"))) mem_0_28.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_29.CE <= RW0_clk mem_0_29.A <= RW0_addr node RW0_rdata_0_29 = bits(mem_0_29.O, 0, 0) mem_0_29.I <= bits(RW0_wdata, 29, 29) - mem_0_29.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_29.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_29.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 29, 29)), UInt<1>("h1"))) mem_0_29.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_30.CE <= RW0_clk mem_0_30.A <= RW0_addr node RW0_rdata_0_30 = bits(mem_0_30.O, 0, 0) mem_0_30.I <= bits(RW0_wdata, 30, 30) - mem_0_30.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_30.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_30.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 30, 30)), UInt<1>("h1"))) mem_0_30.CSB <= not(and(RW0_en, UInt<1>("h1"))) mem_0_31.CE <= RW0_clk mem_0_31.A <= RW0_addr node RW0_rdata_0_31 = bits(mem_0_31.O, 0, 0) mem_0_31.I <= bits(RW0_wdata, 31, 31) - mem_0_31.OEB <= not(and(not(RW0_wmode), UInt<1>("h1"))) + mem_0_31.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) mem_0_31.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 31, 31)), UInt<1>("h1"))) mem_0_31.CSB <= not(and(RW0_en, UInt<1>("h1"))) node RW0_rdata_0 = cat(RW0_rdata_0_31, cat(RW0_rdata_0_30, cat(RW0_rdata_0_29, cat(RW0_rdata_0_28, cat(RW0_rdata_0_27, cat(RW0_rdata_0_26, cat(RW0_rdata_0_25, cat(RW0_rdata_0_24, cat(RW0_rdata_0_23, cat(RW0_rdata_0_22, cat(RW0_rdata_0_21, cat(RW0_rdata_0_20, cat(RW0_rdata_0_19, cat(RW0_rdata_0_18, cat(RW0_rdata_0_17, cat(RW0_rdata_0_16, cat(RW0_rdata_0_15, cat(RW0_rdata_0_14, cat(RW0_rdata_0_13, cat(RW0_rdata_0_12, cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))))))))))))))))))))))) diff --git a/macros/src/test/scala/SynFlops.scala b/macros/src/test/scala/SynFlops.scala index 8198d8f38..0723bb33a 100644 --- a/macros/src/test/scala/SynFlops.scala +++ b/macros/src/test/scala/SynFlops.scala @@ -11,7 +11,7 @@ s""" mem_0_0.${libPortPrefix}_addr <= ${libPortPrefix}_addr node ${libPortPrefix}_dout_0_0 = bits(mem_0_0.${libPortPrefix}_dout, ${libWidth-1}, 0) mem_0_0.${libPortPrefix}_din <= bits(${libPortPrefix}_din, ${libWidth-1}, 0) - mem_0_0.${libPortPrefix}_write_en <= and(and(${libPortPrefix}_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_0.${libPortPrefix}_write_en <= and(and(and(${libPortPrefix}_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node ${libPortPrefix}_dout_0 = ${libPortPrefix}_dout_0_0 ${libPortPrefix}_dout <= mux(UInt<1>("h1"), ${libPortPrefix}_dout_0, UInt<1>("h0")) @@ -148,7 +148,7 @@ circuit target_memory : mem_0_0.innerB_clk <= outerA_clk mem_0_0.innerB_addr <= outerA_addr mem_0_0.innerB_din <= bits(outerA_din, 7, 0) - mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) + mem_0_0.innerB_write_en <= and(and(and(outerA_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) mem_0_0.innerA_clk <= outerB_clk mem_0_0.innerA_addr <= outerB_addr node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) @@ -157,7 +157,7 @@ circuit target_memory : mem_1_0.innerB_clk <= outerA_clk mem_1_0.innerB_addr <= outerA_addr mem_1_0.innerB_din <= bits(outerA_din, 7, 0) - mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) + mem_1_0.innerB_write_en <= and(and(and(outerA_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) mem_1_0.innerA_clk <= outerB_clk mem_1_0.innerA_addr <= outerB_addr node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) @@ -182,7 +182,7 @@ circuit target_memory : mem_0_0.innerB_clk <= innerB_clk mem_0_0.innerB_addr <= innerB_addr mem_0_0.innerB_din <= bits(innerB_din, 7, 0) - mem_0_0.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) + mem_0_0.innerB_write_en <= and(and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) mem_0_0.innerA_clk <= innerA_clk mem_0_0.innerA_addr <= innerA_addr node innerA_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) diff --git a/mdf b/mdf index 515dda512..4281e8f62 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 515dda51206eb40bcbe902700abc8ca36b141c0d +Subproject commit 4281e8f621decc10a8cdb878c593e46115c70998 diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index da090dbf0..08d849835 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -34,11 +34,12 @@ object ResetN extends Pass { mod.copy(ports = portsx, body = bodyx) } - def run(c: Circuit): Circuit = + def run(c: Circuit): Circuit = { c.copy(modules = c.modules map { case mod: Module if mod.name == c.main => invertReset(mod) case other => other }) + } } class ResetInverterTransform extends Transform { diff --git a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala index 7abcbf4c9..07fca3028 100644 --- a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala @@ -22,7 +22,7 @@ class ResetNSpec extends FreeSpec with Matchers { "Inverting reset needs to be done throughout module" in { val optionsManager = new ExecutionOptionsManager("dsptools") with HasChiselExecutionOptions with HasFirrtlOptions { - firrtlOptions = firrtlOptions.copy(compilerName = "low") + firrtlOptions = firrtlOptions.copy(compilerName = "low", customTransforms = List(new ResetInverterTransform)), } chisel3.Driver.execute(optionsManager, () => new ExampleModuleNeedsResetInverted) match { case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, firrtl))) => diff --git a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala index 7c6348370..76223b717 100644 --- a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala +++ b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala @@ -27,10 +27,10 @@ class RetimeSpec extends FlatSpec with Matchers { val dir = uniqueDirName(gen, "RetimeModule") chisel3.Driver.execute(Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final"), gen) shouldBe a [ChiselExecutionSuccess] - val lines = io.Source.fromFile(s"test_run_dir/$dir/final.anno.json").getLines().map(normalized).mkString("\n") + val lines = io.Source.fromFile(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json").getLines().map(normalized).mkString("\n") lines should include("barstools.tapeout.transforms.retime.RetimeTransform") } - + // TODO(azidar): need to fix/add instance annotations ignore should "pass simple retime instance annotation" in { val gen = () => new RetimeInstance() From 5fcae018256a5f9992ce95f514928b61a988c83b Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Wed, 19 Feb 2020 18:52:48 -0800 Subject: [PATCH 157/273] Fix width of zeros after #74 --- macros/src/test/scala/CostFunction.scala | 2 +- macros/src/test/scala/MultiPort.scala | 10 +++---- macros/src/test/scala/SimpleSplitDepth.scala | 10 +++---- macros/src/test/scala/SimpleSplitWidth.scala | 6 ++--- macros/src/test/scala/SpecificExamples.scala | 28 ++++++++++---------- macros/src/test/scala/SynFlops.scala | 10 +++---- 6 files changed, 33 insertions(+), 33 deletions(-) diff --git a/macros/src/test/scala/CostFunction.scala b/macros/src/test/scala/CostFunction.scala index 35936ed14..b8a27f7fe 100644 --- a/macros/src/test/scala/CostFunction.scala +++ b/macros/src/test/scala/CostFunction.scala @@ -99,7 +99,7 @@ circuit target_memory : mem_0_3.din <= bits(din, 127, 96) mem_0_3.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node dout_0 = cat(dout_0_3, cat(dout_0_2, cat(dout_0_1, dout_0_0))) - dout <= mux(UInt<1>("h1"), dout_0, UInt<1>("h0")) + dout <= mux(UInt<1>("h1"), dout_0, UInt<128>("h0")) extmodule SRAM_WIDTH_32 : input addr : UInt<10> diff --git a/macros/src/test/scala/MultiPort.scala b/macros/src/test/scala/MultiPort.scala index fdaae9f79..3899f8359 100644 --- a/macros/src/test/scala/MultiPort.scala +++ b/macros/src/test/scala/MultiPort.scala @@ -111,8 +111,8 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim mem_0_3.portB_read_en <= and(portB_read_en, UInt<1>("h1")) mem_0_3.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 3, 3)), UInt<1>("h1")) node portB_dout_0 = cat(portB_dout_0_3, cat(portB_dout_0_2, cat(portB_dout_0_1, portB_dout_0_0))) - portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<1>("h0")) - portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<1>("h0")) + portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<64>("h0")) + portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<64>("h0")) """ compileExecuteAndTest(mem, lib, v, output) @@ -215,7 +215,7 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS node portA_dout_0_3 = bits(mem_0_3.portA_dout, 15, 0) mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) node portA_dout_0 = cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))) - portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<1>("h0")) + portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<64>("h0")) """ compileExecuteAndTest(mem, lib, v, output) @@ -384,8 +384,8 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera mem_0_7.portB_read_en <= and(portB_read_en, UInt<1>("h1")) mem_0_7.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 7, 7)), UInt<1>("h1")) node portB_dout_0 = cat(portB_dout_0_7, cat(portB_dout_0_6, cat(portB_dout_0_5, cat(portB_dout_0_4, cat(portB_dout_0_3, cat(portB_dout_0_2, cat(portB_dout_0_1, portB_dout_0_0))))))) - portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<1>("h0")) - portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<1>("h0")) + portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<64>("h0")) + portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<64>("h0")) """ compileExecuteAndTest(mem, lib, v, output) diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/SimpleSplitDepth.scala index 8df8ec7ee..e3560f9af 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/SimpleSplitDepth.scala @@ -48,7 +48,7 @@ s""" } def generate_outer_dout_tree(i:Int, depthInstances: Int): String = { if (i > depthInstances - 1) { - "UInt<1>(\"h0\")" + s"""UInt<${libWidth}>("h0")""" } else { s"""mux(eq(${memPortPrefix}_addr_sel_reg, UInt<%d>("h%s")), ${memPortPrefix}_dout_%d, %s)""".format( selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, depthInstances) @@ -59,7 +59,7 @@ s""" if (selectBits > 0) { output append generate_outer_dout_tree(0, depthInstances) } else { - output append s"""mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<1>("h0"))""" + output append s"""mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<${libWidth}>("h0"))""" } output.toString @@ -287,7 +287,7 @@ circuit target_memory : mem_1_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h1"))) node outer_dout_1 = outer_dout_1_0 - outer_dout <= mux(eq(outer_addr_sel_reg, UInt<1>("h0")), outer_dout_0, mux(eq(outer_addr_sel_reg, UInt<1>("h1")), outer_dout_1, UInt<1>("h0"))) + outer_dout <= mux(eq(outer_addr_sel_reg, UInt<1>("h0")), outer_dout_0, mux(eq(outer_addr_sel_reg, UInt<1>("h1")), outer_dout_1, UInt<8>("h0"))) extmodule awesome_lib_mem : input lib_addr : UInt<10> input lib_clk : Clock @@ -378,7 +378,7 @@ circuit target_memory : mem_1_0.innerA_addr <= outerB_addr node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) + outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) extmodule awesome_lib_mem : input innerA_addr : UInt<10> @@ -542,7 +542,7 @@ circuit target_memory : mem_1_0.innerA_addr <= outerB_addr node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) + outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) extmodule awesome_lib_mem : input innerA_addr : UInt<10> diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/SimpleSplitWidth.scala index 9cc10f9d9..843eed494 100644 --- a/macros/src/test/scala/SimpleSplitWidth.scala +++ b/macros/src/test/scala/SimpleSplitWidth.scala @@ -63,7 +63,7 @@ s""" output append s""" - ${memPortPrefix}_dout <= mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<1>("h0")) + ${memPortPrefix}_dout <= mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<${memWidth}>("h0")) """ output.toString } @@ -438,7 +438,7 @@ class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGen mem_0_3.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) mem_0_3.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node outer_dout_0 = cat(outer_dout_0_3, cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0))) - outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) + outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<32>("h0")) """ compileExecuteAndTest(mem, lib, v, output) @@ -536,7 +536,7 @@ class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAM mem_0_3.lib_read_en <= and(outer_read_en, UInt<1>("h1")) mem_0_3.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node outer_dout_0 = cat(outer_dout_0_3, cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0))) - outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<1>("h0")) + outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<32>("h0")) """ compileExecuteAndTest(mem, lib, v, output) diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/SpecificExamples.scala index 56f4500f2..e41932bb6 100644 --- a/macros/src/test/scala/SpecificExamples.scala +++ b/macros/src/test/scala/SpecificExamples.scala @@ -76,7 +76,7 @@ circuit cc_banks_0_ext : mem_0_0.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) mem_0_0.wen <= and(and(and(RW0_wmode, RW0_en), UInt<1>("h1")), UInt<1>("h1")) node RW0_rdata_0 = RW0_rdata_0_0 - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<64>("h0")) extmodule fake_mem : input addr : UInt<12> @@ -159,7 +159,7 @@ circuit cc_dir_ext : mem_0_1.mport <= not(cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), bits(RW0_wmask, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) mem_0_1.wen <= and(and(RW0_wmode, RW0_en), UInt<1>("h1")) node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<128>("h0")) extmodule fake_mem : input addr : UInt<9> @@ -486,7 +486,7 @@ circuit smem_0_ext : mem_1_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) mem_1_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) node R0_data_1 = cat(R0_data_1_3, cat(R0_data_1_2, cat(R0_data_1_1, R0_data_1_0))) - R0_data <= mux(eq(R0_addr_sel_reg, UInt<1>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<1>("h1")), R0_data_1, UInt<1>("h0"))) + R0_data <= mux(eq(R0_addr_sel_reg, UInt<1>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<1>("h1")), R0_data_1, UInt<88>("h0"))) module _T_84_ext : @@ -621,7 +621,7 @@ circuit smem_0_ext : mem_3_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) mem_3_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) node R0_data_3 = cat(R0_data_3_1, R0_data_3_0) - R0_data <= mux(eq(R0_addr_sel_reg, UInt<2>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<2>("h1")), R0_data_1, mux(eq(R0_addr_sel_reg, UInt<2>("h2")), R0_data_2, mux(eq(R0_addr_sel_reg, UInt<2>("h3")), R0_data_3, UInt<1>("h0"))))) + R0_data <= mux(eq(R0_addr_sel_reg, UInt<2>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<2>("h1")), R0_data_1, mux(eq(R0_addr_sel_reg, UInt<2>("h2")), R0_data_2, mux(eq(R0_addr_sel_reg, UInt<2>("h3")), R0_data_3, UInt<64>("h0"))))) extmodule my_sram_2rw_128x32 : input A1 : UInt<7> @@ -684,7 +684,7 @@ circuit smem_0_ext : mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<80>("h0")) extmodule my_sram_1rw_64x32 : input A : UInt<6> @@ -723,7 +723,7 @@ circuit smem_0_ext : mem_0_1.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<64>("h0")) extmodule my_sram_1rw_512x32 : input A : UInt<9> @@ -776,7 +776,7 @@ circuit smem_0_ext : mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) - R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) + R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<40>("h0")) extmodule my_sram_2rw_32x22 : input A1 : UInt<5> @@ -1063,7 +1063,7 @@ circuit smem_0_ext : mem_0_31.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 31, 31)), UInt<1>("h1"))) mem_0_31.CSB <= not(and(RW0_en, UInt<1>("h1"))) node RW0_rdata_0 = cat(RW0_rdata_0_31, cat(RW0_rdata_0_30, cat(RW0_rdata_0_29, cat(RW0_rdata_0_28, cat(RW0_rdata_0_27, cat(RW0_rdata_0_26, cat(RW0_rdata_0_25, cat(RW0_rdata_0_24, cat(RW0_rdata_0_23, cat(RW0_rdata_0_22, cat(RW0_rdata_0_21, cat(RW0_rdata_0_20, cat(RW0_rdata_0_19, cat(RW0_rdata_0_18, cat(RW0_rdata_0_17, cat(RW0_rdata_0_16, cat(RW0_rdata_0_15, cat(RW0_rdata_0_14, cat(RW0_rdata_0_13, cat(RW0_rdata_0_12, cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))))))))))))))))))))))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<32>("h0")) module smem_0_ext : input RW0_addr : UInt<6> @@ -1331,7 +1331,7 @@ circuit smem_0_ext : mem_0_31.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 31, 31)), UInt<1>("h1"))) mem_0_31.CSB <= not(and(RW0_en, UInt<1>("h1"))) node RW0_rdata_0 = cat(RW0_rdata_0_31, cat(RW0_rdata_0_30, cat(RW0_rdata_0_29, cat(RW0_rdata_0_28, cat(RW0_rdata_0_27, cat(RW0_rdata_0_26, cat(RW0_rdata_0_25, cat(RW0_rdata_0_24, cat(RW0_rdata_0_23, cat(RW0_rdata_0_22, cat(RW0_rdata_0_21, cat(RW0_rdata_0_20, cat(RW0_rdata_0_19, cat(RW0_rdata_0_18, cat(RW0_rdata_0_17, cat(RW0_rdata_0_16, cat(RW0_rdata_0_15, cat(RW0_rdata_0_14, cat(RW0_rdata_0_13, cat(RW0_rdata_0_12, cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))))))))))))))))))))))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<32>("h0")) extmodule my_sram_1rw_64x8 : input A : UInt<6> @@ -1375,7 +1375,7 @@ class SmallTagArrayTest extends MacroCompilerSpec with HasSRAMGenerator with Has | mem_0_0.mask <= cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), bits(mask, 0, 0)))))))))))))))))))))))))))))))) | mem_0_0.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) | node dout_0 = dout_0_0 - | dout <= mux(UInt<1>("h1"), dout_0, UInt<1>("h0")) + | dout <= mux(UInt<1>("h1"), dout_0, UInt<26>("h0")) """.stripMargin compileExecuteAndTest(mem, lib, v, output) @@ -1573,7 +1573,7 @@ circuit T_2172_ext : mem_0_3.din <= bits(RW0_wdata, 79, 60) mem_0_3.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<80>("h0")) extmodule SRAM1RW64x32 : input addr : UInt<6> @@ -1605,7 +1605,7 @@ circuit T_2172_ext : mem_0_1.din <= bits(RW0_wdata, 63, 32) mem_0_1.write_en <= and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1")) node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<64>("h0")) extmodule SRAM1RW512x32 : input addr : UInt<9> @@ -1675,7 +1675,7 @@ circuit T_2172_ext : mem_0_7.din <= bits(RW0_wdata, 63, 56) mem_0_7.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1")) node RW0_rdata_0 = cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<1>("h0")) + RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<64>("h0")) extmodule SRAM1RW512x8 : input addr : UInt<9> @@ -1731,7 +1731,7 @@ circuit T_2172_ext : mem_0_3.portA_addr <= R0_addr node R0_data_0_3 = bits(mem_0_3.portA_dout, 21, 0) node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) - R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<1>("h0")) + R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<88>("h0")) extmodule SRAM2RW64x32 : input portA_addr : UInt<6> diff --git a/macros/src/test/scala/SynFlops.scala b/macros/src/test/scala/SynFlops.scala index 0723bb33a..f12161a18 100644 --- a/macros/src/test/scala/SynFlops.scala +++ b/macros/src/test/scala/SynFlops.scala @@ -13,7 +13,7 @@ s""" mem_0_0.${libPortPrefix}_din <= bits(${libPortPrefix}_din, ${libWidth-1}, 0) mem_0_0.${libPortPrefix}_write_en <= and(and(and(${libPortPrefix}_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node ${libPortPrefix}_dout_0 = ${libPortPrefix}_dout_0_0 - ${libPortPrefix}_dout <= mux(UInt<1>("h1"), ${libPortPrefix}_dout_0, UInt<1>("h0")) + ${libPortPrefix}_dout <= mux(UInt<1>("h1"), ${libPortPrefix}_dout_0, UInt<${libWidth}>("h0")) module split_${lib_name} : input ${libPortPrefix}_addr : UInt<${lib_addr_width}> @@ -162,7 +162,7 @@ circuit target_memory : mem_1_0.innerA_addr <= outerB_addr node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) + outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) """ override def generateFooterPorts = @@ -187,7 +187,7 @@ circuit target_memory : mem_0_0.innerA_addr <= innerA_addr node innerA_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) node innerA_dout_0 = innerA_dout_0_0 - innerA_dout <= mux(UInt<1>("h1"), innerA_dout_0, UInt<1>("h0")) + innerA_dout <= mux(UInt<1>("h1"), innerA_dout_0, UInt<8>("h0")) module split_awesome_lib_mem : input innerA_addr : UInt<10> @@ -294,7 +294,7 @@ circuit target_memory : mem_1_0.innerA_addr <= outerB_addr node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<1>("h0"))) + outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) """ override def generateFooterPorts = @@ -384,7 +384,7 @@ circuit target_memory : mem_0_7.innerA_addr <= innerA_addr node innerA_dout_0_7 = bits(mem_0_7.innerA_dout, 0, 0) node innerA_dout_0 = cat(innerA_dout_0_7, cat(innerA_dout_0_6, cat(innerA_dout_0_5, cat(innerA_dout_0_4, cat(innerA_dout_0_3, cat(innerA_dout_0_2, cat(innerA_dout_0_1, innerA_dout_0_0))))))) - innerA_dout <= mux(UInt<1>("h1"), innerA_dout_0, UInt<1>("h0")) + innerA_dout <= mux(UInt<1>("h1"), innerA_dout_0, UInt<8>("h0")) module split_awesome_lib_mem : From 84c880d231435b59e8ee8791c0d350bdaddb718b Mon Sep 17 00:00:00 2001 From: John Wright Date: Tue, 17 Mar 2020 14:02:11 -0700 Subject: [PATCH 158/273] WIP; does not compile, but useful as a code review starting point --- .../resources/barstools/iocell/vsrc/Analog.v | 11 ++ .../resources/barstools/iocell/vsrc/IOCell.v | 46 +++++ iocell/src/main/scala/chisel/Analog.scala | 16 ++ iocell/src/main/scala/chisel/IOCell.scala | 185 ++++++++++++++++++ 4 files changed, 258 insertions(+) create mode 100644 iocell/src/main/resources/barstools/iocell/vsrc/Analog.v create mode 100644 iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v create mode 100644 iocell/src/main/scala/chisel/Analog.scala create mode 100644 iocell/src/main/scala/chisel/IOCell.scala diff --git a/iocell/src/main/resources/barstools/iocell/vsrc/Analog.v b/iocell/src/main/resources/barstools/iocell/vsrc/Analog.v new file mode 100644 index 000000000..0a9abf03d --- /dev/null +++ b/iocell/src/main/resources/barstools/iocell/vsrc/Analog.v @@ -0,0 +1,11 @@ +// See LICENSE for license details + +`timescale 1ns/1ps + +module AnalogConst #(CONST, WIDTH) ( + output [WIDTH-1:0] io +); + + assign io = CONST; + +endmodule diff --git a/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v b/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v new file mode 100644 index 000000000..d0be6b0bc --- /dev/null +++ b/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v @@ -0,0 +1,46 @@ +// See LICENSE for license details + +`timescale 1ns/1ps + +module ExampleAnalogIOCell( + inout pad, + inout core +); + + assign core = 1'bz; + assign pad = core; + +endmodule + +module ExampleDigitalGPIOCell( + inout pad, + output i, + input ie, + input o, + input oe +); + + assign pad = oe ? o : 1'bz; + assign i = ie ? pad : 1'b0; + +endmodule + +module ExampleDigitalInIOCell( + input pad, + output i, + input ie +); + + assign i = ie ? pad : 1'b0; + +endmodule + +module ExampleDigitalOutIOCell( + output pad, + input o, + output oe +); + + assign pad = oe ? o : 1'bz; + +endmodule diff --git a/iocell/src/main/scala/chisel/Analog.scala b/iocell/src/main/scala/chisel/Analog.scala new file mode 100644 index 000000000..e1b4fc782 --- /dev/null +++ b/iocell/src/main/scala/chisel/Analog.scala @@ -0,0 +1,16 @@ +// See LICENSE for license details + +package barstools.iocell.chisel + +import chisel3._ +import chisel3.util.{HasBlackBoxResource} +import chisel3.experimental.{Analog, IntParam} + +class AnalogConst(value: Int, width: Int = 1) extends BlackBox(Map("CONST" -> IntParam(value), "WIDTH" -> IntParam(width))) with HasBlackBoxResource{ + val io = IO(new Bundle {val io = Analog(width.W) } ) + addResource("/barstools/iocell/vsrc/Analog.v") +} + +object AnalogConst { + def apply(value: Int, width: Int = 1) = Module(new AnalogConst(value, width)).io.io +} diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala new file mode 100644 index 000000000..52c935cb7 --- /dev/null +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -0,0 +1,185 @@ +// See LICENSE for license details + +package barstools.iocell.chisel + +import chisel3._ +import chisel3.util.{Cat, HasBlackBoxResource} +import chisel3.experimental.{Analog, DataMirror} + +class AnalogIOCellBundle extends Bundle { + val pad = Analog(1.W) + val core = Analog(1.W) +} + +class DigitalGPIOCellBundle extends Bundle { + val pad = Analog(1.W) + val i = Output(Bool()) + val ie = Input(Bool()) + val o = Input(Bool()) + val oe = Input(Bool()) +} + +class DigitalOutIOCellBundle extends Bundle { + val pad = Output(Bool()) + val o = Input(Bool()) + val oe = Input(Bool()) +} + +class DigitalInIOCellBundle extends Bundle { + val pad = Input(Bool()) + val i = Output(Bool()) + val ie = Input(Bool()) +} + +abstract class IOCell extends BlackBox with HasBlackBoxResource + +abstract class AnalogIOCell extends IOCell { + val io: AnalogIOCellBundle +} + +abstract class DigitalGPIOCell extends IOCell { + val io: DigitalGPIOCellBundle +} + +abstract class DigitalInIOCell extends IOCell { + val io: DigitalInIOCellBundle +} + +abstract class DigitalOutIOCell extends IOCell { + val io: DigitalOutIOCellBundle +} + +class ExampleAnalogIOCell extends AnalogIOCell { + val io = IO(new AnalogIOCellBundle) + addResource("/barstools/iocell/vsrc/IOCell.v") +} + +class ExampleDigitalGPIOCell extends DigitalGPIOCell { + val io = IO(new DigitalGPIOCellBundle) + addResource("/barstools/iocell/vsrc/IOCell.v") +} + +class ExampleDigitalInIOCell extends DigitalInIOCell { + val io = IO(new DigitalInIOCellBundle) + addResource("/barstools/iocell/vsrc/IOCell.v") +} + +class ExampleDigitalOutIOCell extends DigitalOutIOCell { + val io = IO(new DigitalOutIOCellBundle) + addResource("/barstools/iocell/vsrc/IOCell.v") +} + +object IOCell { + + def exampleAnalog() = Module(new ExampleAnalogIOCell) + def exampleGPIO() = Module(new ExampleDigitalGPIOCell) + def exampleInput() = Module(new ExampleDigitalInIOCell) + def exampleOutput() = Module(new ExampleDigitalOutIOCell) + + def generateRaw[T <: Data](signal: T, + inFn: () => DigitalInIOCell = IOCell.exampleInput, + outFn: () => DigitalOutIOCell = IOCell.exampleOutput, + anaFn: () => AnalogIOCell = IOCell.exampleAnalog): (T, Seq[IOCell]) = + { + (signal match { + case signal: Analog => { + require(signal.getWidth <= 1, "Analogs wider than 1 bit are not supported because we can't bit Analogs (https://github.com/freechipsproject/chisel3/issues/536)") + if (signal.getWidth == 0) { + (Analog(0.W), Seq()) + } else { + val iocell = anaFn() + iocell.io.core <> signal + (iocell.io.pad, Seq(iocell)) + } + } + case signal: Clock => { + DataMirror.specifiedDirectionOf(signal) match { + case SpecifiedDirection.Input => { + val iocell = inFn() + signal := iocell.io.i.asClock + iocell.io.ie := true.B + val ck = Wire(Clock()) + iocell.io.pad := ck.asUInt.asBool + (ck, Seq(iocell)) + } + case SpecifiedDirection.Output => { + val iocell = outFn() + iocell.io.o := signal.asUInt.asBool + iocell.io.oe := true.B + (iocell.io.pad.asClock, Seq(iocell)) + } + case _ => throw new Exception("Unknown direction") + } + } + // TODO we may not actually need Bool (it is probably covered by Bits) + case signal: Bool => { + DataMirror.specifiedDirectionOf(signal) match { + case SpecifiedDirection.Input => { + val iocell = inFn() + signal := iocell.io.i + iocell.io.ie := true.B + (iocell.io.pad, Seq(iocell)) + } + case SpecifiedDirection.Output => { + val iocell = outFn() + iocell.io.o := signal + iocell.io.oe := true.B + (iocell.io.pad, Seq(iocell)) + } + case _ => throw new Exception("Unknown direction") + } + } + case signal: Bits => { + DataMirror.specifiedDirectionOf(signal) match { + case SpecifiedDirection.Input => { + val wire = Wire(chiselTypeOf(signal)) + val iocells = wire.asBools.map { w => + val iocell = inFn() + iocell.io.pad := w + iocell.io.ie := true.B + iocell + } + if (iocells.size > 0) { + signal := Cat(iocells.map(_.io.i).reverse) + } + (wire, iocells) + } + case SpecifiedDirection.Output => { + val iocells = signal.asBools.map { b => + val iocell = outFn() + iocell.io.o := b + iocell.io.oe := true.B + iocell + } + if (iocells.size > 0) { + (Cat(iocells.map(_.io.pad).reverse), iocells) + } else { + (Wire(Bits(0.W)), iocells) + } + } + case _ => throw new Exception("Unknown direction") + } + } + case signal: Vec[_] => { + val wire = Wire(chiselTypeOf(signal)) + val iocells = signal.zip(wire).foldLeft(Seq.empty[IOCell]) { case (total, (sig, w)) => + val (pad, ios) = IOCell.generateRaw(sig, inFn, outFn, anaFn) + w <> pad + total ++ ios + } + (wire, iocells) + } + case signal: Record => { + val wire = Wire(chiselTypeOf(signal)) + val iocells = signal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (name, sig)) => + val (pad, ios) = IOCell.generateRaw(sig, inFn, outFn, anaFn) + wire.elements(name) <> pad + total ++ ios + } + (wire, iocells) + } + case _ => { throw new Exception("Oops, I don't know how to handle this signal.") } + }).asInstanceOf[(T, Seq[IOCell])] + } + +} From 8a38171d18b07cd8a45c60aa8eb716a3e4ea9f21 Mon Sep 17 00:00:00 2001 From: John Wright Date: Wed, 18 Mar 2020 21:05:27 -0700 Subject: [PATCH 159/273] First pass that works --- iocell/src/main/scala/chisel/IOCell.scala | 155 +++++++++++----------- 1 file changed, 80 insertions(+), 75 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index 52c935cb7..d6de4baa3 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -4,7 +4,7 @@ package barstools.iocell.chisel import chisel3._ import chisel3.util.{Cat, HasBlackBoxResource} -import chisel3.experimental.{Analog, DataMirror} +import chisel3.experimental.{Analog, DataMirror, IO} class AnalogIOCellBundle extends Bundle { val pad = Analog(1.W) @@ -76,110 +76,115 @@ object IOCell { def exampleInput() = Module(new ExampleDigitalInIOCell) def exampleOutput() = Module(new ExampleDigitalOutIOCell) - def generateRaw[T <: Data](signal: T, +/* This doesn't work because chiselTypeOf doesn't preserve direction info :( + def generateIOFromSignal[T <: Data](coreSignal: T, inFn: () => DigitalInIOCell = IOCell.exampleInput, outFn: () => DigitalOutIOCell = IOCell.exampleOutput, anaFn: () => AnalogIOCell = IOCell.exampleAnalog): (T, Seq[IOCell]) = { - (signal match { - case signal: Analog => { - require(signal.getWidth <= 1, "Analogs wider than 1 bit are not supported because we can't bit Analogs (https://github.com/freechipsproject/chisel3/issues/536)") - if (signal.getWidth == 0) { - (Analog(0.W), Seq()) + val padSignal = DataMirror.specifiedDirectionOf(coreSignal) match { + case SpecifiedDirection.Input => IO(Input(chiselTypeOf(coreSignal))) + case SpecifiedDirection.Output => IO(Output(chiselTypeOf(coreSignal))) + case SpecifiedDirection.Flip => IO(Flipped(chiselTypeOf(coreSignal))) + case _ => IO(chiselTypeOf(coreSignal)) + } + + val iocells = IOCell.generateFromSignal(coreSignal, padSignal, inFn, outFn, anaFn) + (padSignal, iocells).asInstanceOf[(T, Seq[IOCell])] + } +*/ + + def generateFromSignal[T <: Data](coreSignal: T, padSignal: T, + inFn: () => DigitalInIOCell = IOCell.exampleInput, + outFn: () => DigitalOutIOCell = IOCell.exampleOutput, + anaFn: () => AnalogIOCell = IOCell.exampleAnalog): Seq[IOCell] = + { + coreSignal match { + case coreSignal: Analog => { + if (coreSignal.getWidth == 0) { + Seq() } else { + require(coreSignal.getWidth == 1, "Analogs wider than 1 bit are not supported because we can't bit-select Analogs (https://github.com/freechipsproject/chisel3/issues/536)") val iocell = anaFn() - iocell.io.core <> signal - (iocell.io.pad, Seq(iocell)) - } - } - case signal: Clock => { - DataMirror.specifiedDirectionOf(signal) match { - case SpecifiedDirection.Input => { - val iocell = inFn() - signal := iocell.io.i.asClock - iocell.io.ie := true.B - val ck = Wire(Clock()) - iocell.io.pad := ck.asUInt.asBool - (ck, Seq(iocell)) - } - case SpecifiedDirection.Output => { - val iocell = outFn() - iocell.io.o := signal.asUInt.asBool - iocell.io.oe := true.B - (iocell.io.pad.asClock, Seq(iocell)) - } - case _ => throw new Exception("Unknown direction") + iocell.io.core <> coreSignal + padSignal <> iocell.io.pad + Seq(iocell) } } - // TODO we may not actually need Bool (it is probably covered by Bits) - case signal: Bool => { - DataMirror.specifiedDirectionOf(signal) match { - case SpecifiedDirection.Input => { + case coreSignal: Clock => { + DataMirror.directionOf(coreSignal) match { + case ActualDirection.Input => { val iocell = inFn() - signal := iocell.io.i + coreSignal := iocell.io.i.asClock iocell.io.ie := true.B - (iocell.io.pad, Seq(iocell)) + iocell.io.pad := padSignal.asUInt.asBool + Seq(iocell) } - case SpecifiedDirection.Output => { + case ActualDirection.Output => { val iocell = outFn() - iocell.io.o := signal + iocell.io.o := coreSignal.asUInt.asBool iocell.io.oe := true.B - (iocell.io.pad, Seq(iocell)) + padSignal := iocell.io.pad.asClock + Seq(iocell) } case _ => throw new Exception("Unknown direction") } } - case signal: Bits => { - DataMirror.specifiedDirectionOf(signal) match { - case SpecifiedDirection.Input => { - val wire = Wire(chiselTypeOf(signal)) - val iocells = wire.asBools.map { w => - val iocell = inFn() - iocell.io.pad := w - iocell.io.ie := true.B - iocell - } - if (iocells.size > 0) { - signal := Cat(iocells.map(_.io.i).reverse) - } - (wire, iocells) + case coreSignal: Bits => { + require(padSignal.getWidth == coreSignal.getWidth, "padSignal and coreSignal must be the same width") + if (padSignal.getWidth == 0) { + // This dummy assignment will prevent invalid firrtl from being emitted + DataMirror.directionOf(coreSignal) match { + case ActualDirection.Input => coreSignal := 0.U } - case SpecifiedDirection.Output => { - val iocells = signal.asBools.map { b => - val iocell = outFn() - iocell.io.o := b - iocell.io.oe := true.B - iocell + Seq() + } else { + DataMirror.directionOf(coreSignal) match { + case ActualDirection.Input => { + // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that + val iocells = padSignal.asInstanceOf[Bits].asBools.map { w => + val iocell = inFn() + iocell.io.pad := w + iocell.io.ie := true.B + iocell + } + coreSignal := Cat(iocells.map(_.io.i).reverse) + iocells } - if (iocells.size > 0) { - (Cat(iocells.map(_.io.pad).reverse), iocells) - } else { - (Wire(Bits(0.W)), iocells) + case ActualDirection.Output => { + val iocells = coreSignal.asBools.map { w => + val iocell = outFn() + iocell.io.o := w + iocell.io.oe := true.B + iocell + } + padSignal := Cat(iocells.map(_.io.pad).reverse) + iocells } + case _ => throw new Exception("Unknown direction") } - case _ => throw new Exception("Unknown direction") } } - case signal: Vec[_] => { - val wire = Wire(chiselTypeOf(signal)) - val iocells = signal.zip(wire).foldLeft(Seq.empty[IOCell]) { case (total, (sig, w)) => - val (pad, ios) = IOCell.generateRaw(sig, inFn, outFn, anaFn) - w <> pad + case coreSignal: Vec[Data] => { + // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that + val padSignal2 = padSignal.asInstanceOf[Vec[Data]] + require(padSignal2.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same") + coreSignal.zip(padSignal2).foldLeft(Seq.empty[IOCell]) { case (total, (core, pad)) => + val ios = IOCell.generateFromSignal(core, pad, inFn, outFn, anaFn) total ++ ios } - (wire, iocells) } - case signal: Record => { - val wire = Wire(chiselTypeOf(signal)) - val iocells = signal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (name, sig)) => - val (pad, ios) = IOCell.generateRaw(sig, inFn, outFn, anaFn) - wire.elements(name) <> pad + case coreSignal: Record => { + // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that + val padSignal2 = padSignal.asInstanceOf[Record] + coreSignal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (name, core)) => + val pad = padSignal2.elements(name) + val ios = IOCell.generateFromSignal(core, pad, inFn, outFn, anaFn) total ++ ios } - (wire, iocells) } case _ => { throw new Exception("Oops, I don't know how to handle this signal.") } - }).asInstanceOf[(T, Seq[IOCell])] + } } } From f6057ff947497a091b4c3f6d72a266f6545ef83f Mon Sep 17 00:00:00 2001 From: John Wright Date: Wed, 18 Mar 2020 22:25:08 -0700 Subject: [PATCH 160/273] Allow naming, make the auto-clone IO method work --- iocell/src/main/scala/chisel/IOCell.scala | 38 +++++++++++------------ 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index d6de4baa3..cd69dccd8 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -76,25 +76,17 @@ object IOCell { def exampleInput() = Module(new ExampleDigitalInIOCell) def exampleOutput() = Module(new ExampleDigitalOutIOCell) -/* This doesn't work because chiselTypeOf doesn't preserve direction info :( - def generateIOFromSignal[T <: Data](coreSignal: T, + def generateIOFromSignal[T <: Data](coreSignal: T, name: Option[String] = None, inFn: () => DigitalInIOCell = IOCell.exampleInput, outFn: () => DigitalOutIOCell = IOCell.exampleOutput, anaFn: () => AnalogIOCell = IOCell.exampleAnalog): (T, Seq[IOCell]) = { - val padSignal = DataMirror.specifiedDirectionOf(coreSignal) match { - case SpecifiedDirection.Input => IO(Input(chiselTypeOf(coreSignal))) - case SpecifiedDirection.Output => IO(Output(chiselTypeOf(coreSignal))) - case SpecifiedDirection.Flip => IO(Flipped(chiselTypeOf(coreSignal))) - case _ => IO(chiselTypeOf(coreSignal)) - } - - val iocells = IOCell.generateFromSignal(coreSignal, padSignal, inFn, outFn, anaFn) - (padSignal, iocells).asInstanceOf[(T, Seq[IOCell])] + val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)) + val iocells = IOCell.generateFromSignal(coreSignal, padSignal, name, inFn, outFn, anaFn) + (padSignal, iocells) } -*/ - def generateFromSignal[T <: Data](coreSignal: T, padSignal: T, + def generateFromSignal[T <: Data](coreSignal: T, padSignal: T, name: Option[String] = None, inFn: () => DigitalInIOCell = IOCell.exampleInput, outFn: () => DigitalOutIOCell = IOCell.exampleOutput, anaFn: () => AnalogIOCell = IOCell.exampleAnalog): Seq[IOCell] = @@ -106,6 +98,7 @@ object IOCell { } else { require(coreSignal.getWidth == 1, "Analogs wider than 1 bit are not supported because we can't bit-select Analogs (https://github.com/freechipsproject/chisel3/issues/536)") val iocell = anaFn() + name.foreach(n => iocell.suggestName(n)) iocell.io.core <> coreSignal padSignal <> iocell.io.pad Seq(iocell) @@ -115,6 +108,7 @@ object IOCell { DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { val iocell = inFn() + name.foreach(n => iocell.suggestName(n)) coreSignal := iocell.io.i.asClock iocell.io.ie := true.B iocell.io.pad := padSignal.asUInt.asBool @@ -122,6 +116,7 @@ object IOCell { } case ActualDirection.Output => { val iocell = outFn() + name.foreach(n => iocell.suggestName(n)) iocell.io.o := coreSignal.asUInt.asBool iocell.io.oe := true.B padSignal := iocell.io.pad.asClock @@ -136,14 +131,16 @@ object IOCell { // This dummy assignment will prevent invalid firrtl from being emitted DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => coreSignal := 0.U + case _ => {} } Seq() } else { DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that - val iocells = padSignal.asInstanceOf[Bits].asBools.map { w => + val iocells = padSignal.asInstanceOf[Bits].asBools.zipWithIndex.map { case (w, i) => val iocell = inFn() + name.foreach(n => iocell.suggestName(n + "_" + i)) iocell.io.pad := w iocell.io.ie := true.B iocell @@ -152,8 +149,9 @@ object IOCell { iocells } case ActualDirection.Output => { - val iocells = coreSignal.asBools.map { w => + val iocells = coreSignal.asBools.zipWithIndex.map { case (w, i) => val iocell = outFn() + name.foreach(n => iocell.suggestName(n + "_" + i)) iocell.io.o := w iocell.io.oe := true.B iocell @@ -169,17 +167,17 @@ object IOCell { // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that val padSignal2 = padSignal.asInstanceOf[Vec[Data]] require(padSignal2.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same") - coreSignal.zip(padSignal2).foldLeft(Seq.empty[IOCell]) { case (total, (core, pad)) => - val ios = IOCell.generateFromSignal(core, pad, inFn, outFn, anaFn) + coreSignal.zip(padSignal2).zipWithIndex.foldLeft(Seq.empty[IOCell]) { case (total, ((core, pad), i)) => + val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + i), inFn, outFn, anaFn) total ++ ios } } case coreSignal: Record => { // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that val padSignal2 = padSignal.asInstanceOf[Record] - coreSignal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (name, core)) => - val pad = padSignal2.elements(name) - val ios = IOCell.generateFromSignal(core, pad, inFn, outFn, anaFn) + coreSignal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (eltName, core)) => + val pad = padSignal2.elements(eltName) + val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + eltName), inFn, outFn, anaFn) total ++ ios } } From a6731f6a5e631b7a9b531661fadda35a99d428f1 Mon Sep 17 00:00:00 2001 From: John Wright Date: Mon, 30 Mar 2020 12:33:44 -0700 Subject: [PATCH 161/273] Rename example -> generic --- iocell/src/main/scala/chisel/IOCell.scala | 28 +++++++++++------------ 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index cd69dccd8..c3f899eea 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -49,37 +49,37 @@ abstract class DigitalOutIOCell extends IOCell { val io: DigitalOutIOCellBundle } -class ExampleAnalogIOCell extends AnalogIOCell { +class GenericAnalogIOCell extends AnalogIOCell { val io = IO(new AnalogIOCellBundle) addResource("/barstools/iocell/vsrc/IOCell.v") } -class ExampleDigitalGPIOCell extends DigitalGPIOCell { +class GenericDigitalGPIOCell extends DigitalGPIOCell { val io = IO(new DigitalGPIOCellBundle) addResource("/barstools/iocell/vsrc/IOCell.v") } -class ExampleDigitalInIOCell extends DigitalInIOCell { +class GenericDigitalInIOCell extends DigitalInIOCell { val io = IO(new DigitalInIOCellBundle) addResource("/barstools/iocell/vsrc/IOCell.v") } -class ExampleDigitalOutIOCell extends DigitalOutIOCell { +class GenericDigitalOutIOCell extends DigitalOutIOCell { val io = IO(new DigitalOutIOCellBundle) addResource("/barstools/iocell/vsrc/IOCell.v") } object IOCell { - def exampleAnalog() = Module(new ExampleAnalogIOCell) - def exampleGPIO() = Module(new ExampleDigitalGPIOCell) - def exampleInput() = Module(new ExampleDigitalInIOCell) - def exampleOutput() = Module(new ExampleDigitalOutIOCell) + def genericAnalog() = Module(new GenericAnalogIOCell) + def genericGPIO() = Module(new GenericDigitalGPIOCell) + def genericInput() = Module(new GenericDigitalInIOCell) + def genericOutput() = Module(new GenericDigitalOutIOCell) def generateIOFromSignal[T <: Data](coreSignal: T, name: Option[String] = None, - inFn: () => DigitalInIOCell = IOCell.exampleInput, - outFn: () => DigitalOutIOCell = IOCell.exampleOutput, - anaFn: () => AnalogIOCell = IOCell.exampleAnalog): (T, Seq[IOCell]) = + inFn: () => DigitalInIOCell = IOCell.genericInput, + outFn: () => DigitalOutIOCell = IOCell.genericOutput, + anaFn: () => AnalogIOCell = IOCell.genericAnalog): (T, Seq[IOCell]) = { val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)) val iocells = IOCell.generateFromSignal(coreSignal, padSignal, name, inFn, outFn, anaFn) @@ -87,9 +87,9 @@ object IOCell { } def generateFromSignal[T <: Data](coreSignal: T, padSignal: T, name: Option[String] = None, - inFn: () => DigitalInIOCell = IOCell.exampleInput, - outFn: () => DigitalOutIOCell = IOCell.exampleOutput, - anaFn: () => AnalogIOCell = IOCell.exampleAnalog): Seq[IOCell] = + inFn: () => DigitalInIOCell = IOCell.genericInput, + outFn: () => DigitalOutIOCell = IOCell.genericOutput, + anaFn: () => AnalogIOCell = IOCell.genericAnalog): Seq[IOCell] = { coreSignal match { case coreSignal: Analog => { From 62df79934ed84d4a9cafb3ac54543e3141c00062 Mon Sep 17 00:00:00 2001 From: John Wright Date: Mon, 30 Mar 2020 13:10:00 -0700 Subject: [PATCH 162/273] Remove type casts; use a tuple match instead --- iocell/src/main/scala/chisel/IOCell.scala | 25 +++++++++-------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index c3f899eea..1e623888a 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -91,8 +91,8 @@ object IOCell { outFn: () => DigitalOutIOCell = IOCell.genericOutput, anaFn: () => AnalogIOCell = IOCell.genericAnalog): Seq[IOCell] = { - coreSignal match { - case coreSignal: Analog => { + (coreSignal: T, padSignal: T) match { + case (coreSignal: Analog, padSignal: Analog) => { if (coreSignal.getWidth == 0) { Seq() } else { @@ -104,7 +104,7 @@ object IOCell { Seq(iocell) } } - case coreSignal: Clock => { + case (coreSignal: Clock, padSignal: Clock) => { DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { val iocell = inFn() @@ -125,7 +125,7 @@ object IOCell { case _ => throw new Exception("Unknown direction") } } - case coreSignal: Bits => { + case (coreSignal: Bits, padSignal: Bits) => { require(padSignal.getWidth == coreSignal.getWidth, "padSignal and coreSignal must be the same width") if (padSignal.getWidth == 0) { // This dummy assignment will prevent invalid firrtl from being emitted @@ -137,8 +137,7 @@ object IOCell { } else { DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { - // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that - val iocells = padSignal.asInstanceOf[Bits].asBools.zipWithIndex.map { case (w, i) => + val iocells = padSignal.asBools.zipWithIndex.map { case (w, i) => val iocell = inFn() name.foreach(n => iocell.suggestName(n + "_" + i)) iocell.io.pad := w @@ -163,20 +162,16 @@ object IOCell { } } } - case coreSignal: Vec[Data] => { - // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that - val padSignal2 = padSignal.asInstanceOf[Vec[Data]] - require(padSignal2.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same") - coreSignal.zip(padSignal2).zipWithIndex.foldLeft(Seq.empty[IOCell]) { case (total, ((core, pad), i)) => + case (coreSignal: Vec[Data], padSignal: Vec[Data]) => { + require(padSignal.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same") + coreSignal.zip(padSignal).zipWithIndex.foldLeft(Seq.empty[IOCell]) { case (total, ((core, pad), i)) => val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + i), inFn, outFn, anaFn) total ++ ios } } - case coreSignal: Record => { - // this type cast is safe because we guarantee that padSignal and coreSignal are the same type (T), but the compiler is not smart enough to know that - val padSignal2 = padSignal.asInstanceOf[Record] + case (coreSignal: Record, padSignal: Record) => { coreSignal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (eltName, core)) => - val pad = padSignal2.elements(eltName) + val pad = padSignal.elements(eltName) val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + eltName), inFn, outFn, anaFn) total ++ ios } From bc3f8a42b30801ae35b96b5792b1aacbb883fd2d Mon Sep 17 00:00:00 2001 From: John Wright Date: Mon, 30 Mar 2020 13:50:27 -0700 Subject: [PATCH 163/273] Forgot to update the verilog modules --- iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v b/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v index d0be6b0bc..b3ee47ced 100644 --- a/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v +++ b/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v @@ -2,7 +2,7 @@ `timescale 1ns/1ps -module ExampleAnalogIOCell( +module GenericAnalogIOCell( inout pad, inout core ); @@ -12,7 +12,7 @@ module ExampleAnalogIOCell( endmodule -module ExampleDigitalGPIOCell( +module GenericDigitalGPIOCell( inout pad, output i, input ie, @@ -25,7 +25,7 @@ module ExampleDigitalGPIOCell( endmodule -module ExampleDigitalInIOCell( +module GenericDigitalInIOCell( input pad, output i, input ie @@ -35,7 +35,7 @@ module ExampleDigitalInIOCell( endmodule -module ExampleDigitalOutIOCell( +module GenericDigitalOutIOCell( output pad, input o, output oe From c043f344b8b7624b8cc029d184481681df69b9b2 Mon Sep 17 00:00:00 2001 From: John Wright Date: Mon, 30 Mar 2020 19:15:19 -0700 Subject: [PATCH 164/273] Code review feedback --- iocell/src/main/scala/chisel/IOCell.scala | 93 ++++++++++++++++++----- 1 file changed, 76 insertions(+), 17 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index 1e623888a..eb3bb9947 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -6,11 +6,31 @@ import chisel3._ import chisel3.util.{Cat, HasBlackBoxResource} import chisel3.experimental.{Analog, DataMirror, IO} +// The following four IO cell bundle types are bare-minimum functional connections +// for modeling 4 different IO cell scenarios. The intention is that the user +// would create wrapper modules that extend these interfaces with additional +// control signals. These are loosely similar to the sifive-blocks PinCtrl bundles +// (https://github.com/sifive/sifive-blocks/blob/master/src/main/scala/devices/pinctrl/PinCtrl.scala), +// but we want to avoid a dependency on an external libraries. + +/** + * The base IO bundle for an analog signal (typically something with no digital buffers inside) + * pad: off-chip (external) connection + * core: internal connection + */ class AnalogIOCellBundle extends Bundle { - val pad = Analog(1.W) - val core = Analog(1.W) + val pad = Analog(1.W) // Pad/bump signal (off-chip) + val core = Analog(1.W) // core signal (on-chip) } +/** + * The base IO bundle for a signal with runtime-controllable direction + * pad: off-chip (external) connection + * i: input to chip logic (output from IO cell) + * ie: enable signal for i + * o: output from chip logic (input to IO cell) + * oe: enable signal for o + */ class DigitalGPIOCellBundle extends Bundle { val pad = Analog(1.W) val i = Output(Bool()) @@ -19,19 +39,31 @@ class DigitalGPIOCellBundle extends Bundle { val oe = Input(Bool()) } +/** + * The base IO bundle for a digital output signal + * pad: off-chip (external) connection + * o: output from chip logic (input to IO cell) + * oe: enable signal for o + */ class DigitalOutIOCellBundle extends Bundle { val pad = Output(Bool()) val o = Input(Bool()) val oe = Input(Bool()) } +/** + * The base IO bundle for a digital input signal + * pad: off-chip (external) connection + * i: input to chip logic (output from IO cell) + * ie: enable signal for i + */ class DigitalInIOCellBundle extends Bundle { val pad = Input(Bool()) val i = Output(Bool()) val ie = Input(Bool()) } -abstract class IOCell extends BlackBox with HasBlackBoxResource +abstract class IOCell extends BlackBox abstract class AnalogIOCell extends IOCell { val io: AnalogIOCellBundle @@ -49,24 +81,28 @@ abstract class DigitalOutIOCell extends IOCell { val io: DigitalOutIOCellBundle } -class GenericAnalogIOCell extends AnalogIOCell { - val io = IO(new AnalogIOCellBundle) +// The following Generic IO cell black boxes have verilog models that mimic a very simple +// implementation of an IO cell. For building a real chip, it is important to implement +// and use similar classes which wrap the foundry-specific IO cells. + +trait GenericIOCell extends HasBlackBoxResource { addResource("/barstools/iocell/vsrc/IOCell.v") } -class GenericDigitalGPIOCell extends DigitalGPIOCell { +class GenericAnalogIOCell extends AnalogIOCell with IsGenericIOCell { + val io = IO(new AnalogIOCellBundle) +} + +class GenericDigitalGPIOCell extends DigitalGPIOCell with IsGenericIOCell { val io = IO(new DigitalGPIOCellBundle) - addResource("/barstools/iocell/vsrc/IOCell.v") } -class GenericDigitalInIOCell extends DigitalInIOCell { +class GenericDigitalInIOCell extends DigitalInIOCell with IsGenericIOCell { val io = IO(new DigitalInIOCellBundle) - addResource("/barstools/iocell/vsrc/IOCell.v") } -class GenericDigitalOutIOCell extends DigitalOutIOCell { +class GenericDigitalOutIOCell extends DigitalOutIOCell with IsGenericIOCell { val io = IO(new DigitalOutIOCellBundle) - addResource("/barstools/iocell/vsrc/IOCell.v") } object IOCell { @@ -76,6 +112,16 @@ object IOCell { def genericInput() = Module(new GenericDigitalInIOCell) def genericOutput() = Module(new GenericDigitalOutIOCell) + /** + * From within a RawModule or MultiIOModule context, generate new module IOs from a given + * signal and return the new IO and a Seq containing all generated IO cells. + * @param coreSignal The signal onto which to add IO cells + * @param name An optional name or name prefix to use for naming IO cells + * @param inFn A function to generate a DigitalInIOCell to use for input signals + * @param outFn A function to generate a DigitalOutIOCell to use for output signals + * @param anaFn A function to generate an AnalogIOCell to use for analog signals + * @return A tuple of (the generated IO data node, a Seq of all generated IO cell instances) + */ def generateIOFromSignal[T <: Data](coreSignal: T, name: Option[String] = None, inFn: () => DigitalInIOCell = IOCell.genericInput, outFn: () => DigitalOutIOCell = IOCell.genericOutput, @@ -86,6 +132,17 @@ object IOCell { (padSignal, iocells) } + /** + * Connect two identical signals together by adding IO cells between them and return a Seq + * containing all generated IO cells. + * @param coreSignal The core-side (internal) signal onto which to connect/add IO cells + * @param padSignal The pad-side (external) signal onto which to connect IO cells + * @param name An optional name or name prefix to use for naming IO cells + * @param inFn A function to generate a DigitalInIOCell to use for input signals + * @param outFn A function to generate a DigitalOutIOCell to use for output signals + * @param anaFn A function to generate an AnalogIOCell to use for analog signals + * @return A Seq of all generated IO cell instances + */ def generateFromSignal[T <: Data](coreSignal: T, padSignal: T, name: Option[String] = None, inFn: () => DigitalInIOCell = IOCell.genericInput, outFn: () => DigitalOutIOCell = IOCell.genericOutput, @@ -122,7 +179,7 @@ object IOCell { padSignal := iocell.io.pad.asClock Seq(iocell) } - case _ => throw new Exception("Unknown direction") + case _ => throw new Exception("Clock signal does not have a direction and cannot be matched to an IOCell") } } case (coreSignal: Bits, padSignal: Bits) => { @@ -137,28 +194,30 @@ object IOCell { } else { DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { - val iocells = padSignal.asBools.zipWithIndex.map { case (w, i) => + val iocells = padSignal.asBools.zipWithIndex.map { case (sig, i) => val iocell = inFn() name.foreach(n => iocell.suggestName(n + "_" + i)) - iocell.io.pad := w + iocell.io.pad := sig iocell.io.ie := true.B iocell } + // Note that the reverse here is because Cat(Seq(a,b,c,d)) yields abcd, but a is index 0 of the Seq coreSignal := Cat(iocells.map(_.io.i).reverse) iocells } case ActualDirection.Output => { - val iocells = coreSignal.asBools.zipWithIndex.map { case (w, i) => + val iocells = coreSignal.asBools.zipWithIndex.map { case (sig, i) => val iocell = outFn() name.foreach(n => iocell.suggestName(n + "_" + i)) - iocell.io.o := w + iocell.io.o := sig iocell.io.oe := true.B iocell } + // Note that the reverse here is because Cat(Seq(a,b,c,d)) yields abcd, but a is index 0 of the Seq padSignal := Cat(iocells.map(_.io.pad).reverse) iocells } - case _ => throw new Exception("Unknown direction") + case _ => throw new Exception("Bits signal does not have a direction and cannot be matched to IOCell(s)") } } } From 6638f5c77e878315eb7dff13e03456ce9e56c118 Mon Sep 17 00:00:00 2001 From: John Wright Date: Tue, 31 Mar 2020 13:06:01 -0700 Subject: [PATCH 165/273] More CR feedback, fix bug introduced in previous commit --- iocell/src/main/scala/chisel/IOCell.scala | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index eb3bb9947..340a4e9d5 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -85,7 +85,7 @@ abstract class DigitalOutIOCell extends IOCell { // implementation of an IO cell. For building a real chip, it is important to implement // and use similar classes which wrap the foundry-specific IO cells. -trait GenericIOCell extends HasBlackBoxResource { +trait IsGenericIOCell extends HasBlackBoxResource { addResource("/barstools/iocell/vsrc/IOCell.v") } @@ -196,7 +196,10 @@ object IOCell { case ActualDirection.Input => { val iocells = padSignal.asBools.zipWithIndex.map { case (sig, i) => val iocell = inFn() - name.foreach(n => iocell.suggestName(n + "_" + i)) + // Note that we are relying on chisel deterministically naming this in the index order (which it does) + // This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals + // An alternative solution would be to suggestName(n + "_" + i) + name.foreach(n => iocell.suggestName(n)) iocell.io.pad := sig iocell.io.ie := true.B iocell @@ -208,7 +211,10 @@ object IOCell { case ActualDirection.Output => { val iocells = coreSignal.asBools.zipWithIndex.map { case (sig, i) => val iocell = outFn() - name.foreach(n => iocell.suggestName(n + "_" + i)) + // Note that we are relying on chisel deterministically naming this in the index order (which it does) + // This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals + // An alternative solution would be to suggestName(n + "_" + i) + name.foreach(n => iocell.suggestName(n)) iocell.io.o := sig iocell.io.oe := true.B iocell From e230e8cf3f7c0bc3c958cab22c5d90d195ca6b01 Mon Sep 17 00:00:00 2001 From: David Biancolin Date: Fri, 17 Apr 2020 22:05:48 -0700 Subject: [PATCH 166/273] Update IOCell gen to handle abstract and async reset (#79) --- iocell/src/main/scala/chisel/IOCell.scala | 75 +++++++++++++++-------- 1 file changed, 48 insertions(+), 27 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index 340a4e9d5..b453e508c 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -120,15 +120,19 @@ object IOCell { * @param inFn A function to generate a DigitalInIOCell to use for input signals * @param outFn A function to generate a DigitalOutIOCell to use for output signals * @param anaFn A function to generate an AnalogIOCell to use for analog signals + * @param abstractResetAsAsync When set, will coerce abstract resets to + * AsyncReset, and otherwise to Bool (sync reset) * @return A tuple of (the generated IO data node, a Seq of all generated IO cell instances) */ def generateIOFromSignal[T <: Data](coreSignal: T, name: Option[String] = None, inFn: () => DigitalInIOCell = IOCell.genericInput, outFn: () => DigitalOutIOCell = IOCell.genericOutput, - anaFn: () => AnalogIOCell = IOCell.genericAnalog): (T, Seq[IOCell]) = + anaFn: () => AnalogIOCell = IOCell.genericAnalog, + abstractResetAsAsync: Boolean = false): (T, Seq[IOCell]) = { val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)) - val iocells = IOCell.generateFromSignal(coreSignal, padSignal, name, inFn, outFn, anaFn) + val resetFn = if (abstractResetAsAsync) toAsyncReset else toSyncReset + val iocells = IOCell.generateFromSignal(coreSignal, padSignal, name, inFn, outFn, anaFn, resetFn) (padSignal, iocells) } @@ -143,12 +147,47 @@ object IOCell { * @param anaFn A function to generate an AnalogIOCell to use for analog signals * @return A Seq of all generated IO cell instances */ - def generateFromSignal[T <: Data](coreSignal: T, padSignal: T, name: Option[String] = None, + val toSyncReset: (Reset) => Bool = _.toBool + val toAsyncReset: (Reset) => AsyncReset = _.asAsyncReset + def generateFromSignal[T <: Data, R <: Reset]( + coreSignal: T, + padSignal: T, + name: Option[String] = None, inFn: () => DigitalInIOCell = IOCell.genericInput, outFn: () => DigitalOutIOCell = IOCell.genericOutput, - anaFn: () => AnalogIOCell = IOCell.genericAnalog): Seq[IOCell] = + anaFn: () => AnalogIOCell = IOCell.genericAnalog, + concretizeResetFn : (Reset) => R = toSyncReset): Seq[IOCell] = { - (coreSignal: T, padSignal: T) match { + def genCell[T <: Data]( + castToBool: (T) => Bool, + castFromBool: (Bool) => T)( + coreSignal: T, + padSignal: T): Seq[IOCell] = { + DataMirror.directionOf(coreSignal) match { + case ActualDirection.Input => { + val iocell = inFn() + name.foreach(n => iocell.suggestName(n)) + coreSignal := castFromBool(iocell.io.i) + iocell.io.ie := true.B + iocell.io.pad := castToBool(padSignal) + Seq(iocell) + } + case ActualDirection.Output => { + val iocell = outFn() + name.foreach(n => iocell.suggestName(n)) + iocell.io.o := castToBool(coreSignal) + iocell.io.oe := true.B + padSignal := castFromBool(iocell.io.pad) + Seq(iocell) + } + case _ => throw new Exception(s"Signal does not have a direction and cannot be matched to an IOCell") + } + } + def genCellForClock = genCell[Clock](_.asUInt.asBool, _.asClock) _ + def genCellForAsyncReset = genCell[AsyncReset](_.asBool, _.asAsyncReset) _ + def genCellForAbstractReset = genCell[Reset](_.asBool, concretizeResetFn) _ + + (coreSignal, padSignal) match { case (coreSignal: Analog, padSignal: Analog) => { if (coreSignal.getWidth == 0) { Seq() @@ -161,27 +200,9 @@ object IOCell { Seq(iocell) } } - case (coreSignal: Clock, padSignal: Clock) => { - DataMirror.directionOf(coreSignal) match { - case ActualDirection.Input => { - val iocell = inFn() - name.foreach(n => iocell.suggestName(n)) - coreSignal := iocell.io.i.asClock - iocell.io.ie := true.B - iocell.io.pad := padSignal.asUInt.asBool - Seq(iocell) - } - case ActualDirection.Output => { - val iocell = outFn() - name.foreach(n => iocell.suggestName(n)) - iocell.io.o := coreSignal.asUInt.asBool - iocell.io.oe := true.B - padSignal := iocell.io.pad.asClock - Seq(iocell) - } - case _ => throw new Exception("Clock signal does not have a direction and cannot be matched to an IOCell") - } - } + case (coreSignal: Clock, padSignal: Clock) => genCellForClock(coreSignal, padSignal) + case (coreSignal: AsyncReset, padSignal: AsyncReset) => genCellForAsyncReset(coreSignal, padSignal) + case (coreSignal: Reset, padSignal: Reset) => genCellForAbstractReset(coreSignal, padSignal) case (coreSignal: Bits, padSignal: Bits) => { require(padSignal.getWidth == coreSignal.getWidth, "padSignal and coreSignal must be the same width") if (padSignal.getWidth == 0) { @@ -227,7 +248,7 @@ object IOCell { } } } - case (coreSignal: Vec[Data], padSignal: Vec[Data]) => { + case (coreSignal: Vec[_], padSignal: Vec[_]) => { require(padSignal.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same") coreSignal.zip(padSignal).zipWithIndex.foldLeft(Seq.empty[IOCell]) { case (total, ((core, pad), i)) => val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + i), inFn, outFn, anaFn) From acda0a34901ad642cf2130cec326001eab3f1fab Mon Sep 17 00:00:00 2001 From: Albert Magyar Date: Wed, 6 May 2020 21:40:23 +0000 Subject: [PATCH 167/273] Changes to tapeout transforms to support FIRRTL 1.3 --- .../transforms/.pads/AddIOPadsTransform.scala | 4 +- .../scala/transforms/.pads/AddPadFrame.scala | 6 +- .../transforms/AddSuffixToModuleNames.scala | 65 +++++++++++++++++++ .../transforms/AvoidExtModuleCollisions.scala | 23 +++++++ .../transforms/ConvertToExtModPass.scala | 19 ++++-- .../src/main/scala/transforms/Generate.scala | 56 ++++++++-------- .../scala/transforms/ReParentCircuit.scala | 26 +++++--- .../RenameModulesAndInstances.scala | 45 ------------- 8 files changed, 150 insertions(+), 94 deletions(-) create mode 100644 tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala create mode 100644 tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala delete mode 100644 tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala diff --git a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala index d427d0d92..b586e8bea 100644 --- a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala +++ b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala @@ -39,7 +39,7 @@ class AddIOPadsTransform extends Transform with SeqTransformBased { PadPlacementFile.generate(techLoc, targetDir, padFrameName, portPads, supplyPads) transformList ++= Seq( Legalize, - ResolveGenders, + ResolveFlows, // Types really need to be known... InferTypes, new AddPadFrame(x.topModName, padFrameName, topInternalName, portPads, supplyPads), @@ -48,7 +48,7 @@ class AddIOPadsTransform extends Transform with SeqTransformBased { InferTypes, Uniquify, ResolveKinds, - ResolveGenders + ResolveFlows ) // Expects BlackBox helper to be run after to inline pad Verilog! val ret = runTransforms(state) diff --git a/tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala b/tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala index 853cfced4..62447bd5b 100644 --- a/tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala +++ b/tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala @@ -112,8 +112,8 @@ class AddPadFrame( // Normal verilog in/out can be mapped to uint, sint, or clocktype, so need cast case _ => val padBBType = UIntType(getWidth(p.port.tpe)) - val padInRef = WSubField(padRef, DigitalPad.inName, padBBType, UNKNOWNGENDER) - val padOutRef = WSubField(padRef, DigitalPad.outName, padBBType, UNKNOWNGENDER) + val padInRef = WSubField(padRef, DigitalPad.inName, padBBType, UnknownFlow) + val padOutRef = WSubField(padRef, DigitalPad.outName, padBBType, UnknownFlow) val (rhsPadIn, lhsPadOut) = p.portDirection match { case Input => (extRef, intRef) case Output => (intRef, extRef) @@ -130,4 +130,4 @@ class AddPadFrame( Module(NoInfo, padFrameName, ports = intPorts ++ extPorts, body = Block(ioPadInsts ++ connects ++ supplyPadInsts)) } -} \ No newline at end of file +} diff --git a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala new file mode 100644 index 000000000..0e1a3739d --- /dev/null +++ b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala @@ -0,0 +1,65 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.annotations._ +import firrtl.Mappers._ + + +case class KeepNameAnnotation(target: ModuleTarget) + extends SingleTargetAnnotation[ModuleTarget] { + def duplicate(n: ModuleTarget) = this.copy(n) +} + +case class ModuleNameSuffixAnnotation(target: CircuitTarget, suffix: String) + extends SingleTargetAnnotation[CircuitTarget] { + def duplicate(n: CircuitTarget) = this.copy(target = n) +} + +// This doesn't rename ExtModules under the assumption that they're some +// Verilog black box and therefore can't be renamed. Since the point is to +// allow FIRRTL to be linked together using "cat" and ExtModules don't get +// emitted, this should be safe. +class AddSuffixToModuleNames extends Transform { + def inputForm = LowForm + def outputForm = LowForm + + def processAnnos(annos: AnnotationSeq): (AnnotationSeq, (String) => String) = { + val whitelist = annos.collect({ case KeepNameAnnotation(tgt) => tgt.module }).toSet + val newAnnos = annos.filterNot(_.isInstanceOf[ModuleNameSuffixAnnotation]) + val suffixes = annos.collect({ case ModuleNameSuffixAnnotation(_, suffix) => suffix }) + require(suffixes.length <= 1) + + val suffix = suffixes.headOption.getOrElse("") + val renamer = { name: String => if (whitelist(name)) name else name + suffix } + (newAnnos, renamer) + } + + def renameInstanceModules(renamer: (String) => String)(stmt: Statement): Statement = { + stmt match { + case m: DefInstance => new DefInstance(m.info, m.name, renamer(m.module)) + case m: WDefInstance => new WDefInstance(m.info, m.name, renamer(m.module), m.tpe) + case s => s map renameInstanceModules(renamer) + } + } + + def run(state: CircuitState, renamer: (String) => String): (Circuit, RenameMap) = { + val myRenames = RenameMap() + val c = state.circuit + val modulesx = c.modules.map { + case m if (renamer(m.name) != m.name) => + myRenames.record(ModuleTarget(c.main, m.name), ModuleTarget(c.main, renamer(m.name))) + m.map(renamer).map(renameInstanceModules(renamer)) + case m => m.map(renameInstanceModules(renamer)) + } + (Circuit(c.info, modulesx, c.main), myRenames) + } + + def execute(state: CircuitState): CircuitState = { + val (newAnnos, renamer) = processAnnos(state.annotations) + val (ret, renames) = run(state, renamer) + state.copy(circuit = ret, annotations = newAnnos, renames = Some(renames)) + } +} diff --git a/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala b/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala new file mode 100644 index 000000000..df1e272e7 --- /dev/null +++ b/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala @@ -0,0 +1,23 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.ir._ +import firrtl.annotations._ + +case class LinkExtModulesAnnotation(mustLink: Seq[ExtModule]) extends NoTargetAnnotation + +class AvoidExtModuleCollisions extends Transform { + def inputForm = HighForm + def outputForm = HighForm + def execute(state: CircuitState): CircuitState = { + val mustLink = state.annotations.flatMap { + case LinkExtModulesAnnotation(mustLink) => mustLink + case _ => Nil + } + val newAnnos = state.annotations.filterNot(_.isInstanceOf[LinkExtModulesAnnotation]) + state.copy(circuit = state.circuit.copy(modules = state.circuit.modules ++ mustLink), annotations = newAnnos) + } +} + diff --git a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala index 6f12e9b31..83486fd51 100644 --- a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala +++ b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala @@ -7,16 +7,19 @@ import firrtl.annotations._ import firrtl.ir._ import firrtl.passes.Pass +case class ConvertToExtModAnnotation(target: ModuleTarget) + extends SingleTargetAnnotation[ModuleTarget] { + def duplicate(n: ModuleTarget) = this.copy(n) +} + // Converts some modules to external modules, based on a given function. If // that function returns "true" then the module is converted into an ExtModule, // otherwise it's left alone. -class ConvertToExtMod(classify: (Module) => Boolean) extends Transform { +class ConvertToExtMod extends Transform { def inputForm = HighForm def outputForm = HighForm - - def run(state: CircuitState): (Circuit, RenameMap) = { - + def run(state: CircuitState, makeExt: Set[String]): (Circuit, RenameMap) = { val renames = RenameMap() val c = state.circuit renames.setCircuit(c.main) @@ -30,7 +33,7 @@ class ConvertToExtMod(classify: (Module) => Boolean) extends Transform { case x => x.foreachStmt(findDeadNames) } } - if (classify(m)) { + if (makeExt(m.name)) { m.foreachStmt(findDeadNames) removing.foreach { name => renames.record(ReferenceTarget(c.main, m.name, Nil, name, Nil), Nil) @@ -44,7 +47,9 @@ class ConvertToExtMod(classify: (Module) => Boolean) extends Transform { } def execute(state: CircuitState): CircuitState = { - val (ret, renames) = run(state) - state.copy(circuit = ret, renames = Some(renames)) + val makeExt = state.annotations.collect({ case ConvertToExtModAnnotation(tgt) => tgt.module }).toSet + val newAnnos = state.annotations.filterNot(_.isInstanceOf[ConvertToExtModAnnotation]) + val (ret, renames) = run(state, makeExt) + state.copy(circuit = ret, annotations = newAnnos, renames = Some(renames)) } } diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 32912c162..89df8b556 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -11,7 +11,7 @@ import firrtl.annotations.AnnotationYamlProtocol._ import firrtl.passes.memlib.ReplSeqMemAnnotation import firrtl.transforms.BlackBoxResourceFileNameAnno import net.jcazevedo.moultingyaml._ -import com.typesafe.scalalogging.LazyLogging +import logger.LazyLogging trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => var tapeoutOptions = TapeoutOptions() @@ -161,36 +161,27 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => // FIRRTL options lazy val annoFiles = firrtlOptions.annotationFileNames - lazy val topTransforms: Seq[Transform] = { - Seq( - new ReParentCircuit(synTop.get), - new RemoveUnusedModules - ) - } + val topTransforms = Seq( + new ReParentCircuit, + new RemoveUnusedModules + ) + + lazy val rootCircuitTarget = CircuitTarget(harnessTop.get) + + lazy val topAnnos = synTop.map(st => ReParentCircuitAnnotation(rootCircuitTarget.module(st))) ++ + tapeoutOptions.topDotfOut.map(BlackBoxResourceFileNameAnno(_)) lazy val topOptions = firrtlOptions.copy( customTransforms = firrtlOptions.customTransforms ++ topTransforms, - annotations = firrtlOptions.annotations ++ tapeoutOptions.topDotfOut.map(BlackBoxResourceFileNameAnno(_)) + annotations = firrtlOptions.annotations ++ topAnnos ) - class AvoidExtModuleCollisions(mustLink: Seq[ExtModule]) extends Transform { - def inputForm = HighForm - def outputForm = HighForm - def execute(state: CircuitState): CircuitState = { - state.copy(circuit = state.circuit.copy(modules = state.circuit.modules ++ mustLink)) - } - } - - private def harnessTransforms(topExtModules: Seq[ExtModule]): Seq[Transform] = { - // XXX this is a hack, we really should be checking the masters to see if they are ExtModules - val externals = Set(harnessTop.get, synTop.get, "SimSerial", "SimDTM") - Seq( - new ConvertToExtMod((m) => m.name == synTop.get), - new RemoveUnusedModules, - new AvoidExtModuleCollisions(topExtModules), - new RenameModulesAndInstances((old) => if (externals contains old) old else (old + "_in" + harnessTop.get)) - ) - } + val harnessTransforms = Seq( + new ConvertToExtMod, + new RemoveUnusedModules, + new AvoidExtModuleCollisions, + new AddSuffixToModuleNames + ) // Dump firrtl and annotation files protected def dump(res: FirrtlExecutionSuccess, firFile: Option[String], annoFile: Option[String]): Unit = { @@ -230,17 +221,26 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => // Execute top and get list of ExtModules to avoid collisions val topExtModules = executeTop() + val externals = Seq("SimSerial", "SimDTM") ++ harnessTop ++ synTop + + val harnessAnnos = + tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)).toSeq ++ + externals.map(ext => KeepNameAnnotation(rootCircuitTarget.module(ext))) ++ + harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ + synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) :+ + LinkExtModulesAnnotation(topExtModules) + // For harness run, change some firrtlOptions (below) for harness phase // customTransforms: setup harness transforms, add AvoidExtModuleCollisions // outputFileNameOverride: change to harnessOutput // conf file must change to harnessConf by mapping annotations optionsManager.firrtlOptions = firrtlOptions.copy( - customTransforms = firrtlOptions.customTransforms ++ harnessTransforms(topExtModules), + customTransforms = firrtlOptions.customTransforms ++ harnessTransforms, outputFileNameOverride = tapeoutOptions.harnessOutput.get, annotations = firrtlOptions.annotations.map({ case ReplSeqMemAnnotation(i, o) => ReplSeqMemAnnotation(i, tapeoutOptions.harnessConf.get) case a => a - }) ++ tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)) + }) ++ harnessAnnos ) val harnessResult = firrtl.Driver.execute(optionsManager) harnessResult match { diff --git a/tapeout/src/main/scala/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/transforms/ReParentCircuit.scala index e1a426640..574d9dda5 100644 --- a/tapeout/src/main/scala/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/transforms/ReParentCircuit.scala @@ -7,18 +7,26 @@ import firrtl.ir._ import firrtl.passes.Pass import firrtl.annotations._ -class ReParentCircuit(newTopName: String) extends Transform { +case class ReParentCircuitAnnotation(target: ModuleTarget) + extends SingleTargetAnnotation[ModuleTarget] { + def duplicate(n: ModuleTarget) = this.copy(n) +} + +class ReParentCircuit extends Transform { def inputForm = HighForm def outputForm = HighForm - def run(c: Circuit, newTopName: String): (Circuit, RenameMap) = { - val myRenames = RenameMap() - myRenames.record(CircuitTarget(c.main), CircuitTarget(newTopName)) - (Circuit(c.info, c.modules, newTopName), myRenames) - } - def execute(state: CircuitState): CircuitState = { - val (ret, renames) = run(state.circuit, newTopName) - state.copy(circuit = ret, renames = Some(renames)) + val c = state.circuit + val newTopName = state.annotations.collectFirst { + case ReParentCircuitAnnotation(tgt) => tgt.module + } + val newCircuit = c.copy(main = newTopName.getOrElse(c.main)) + val mainRename = newTopName.map { s => + val rmap = RenameMap() + rmap.record(CircuitTarget(c.main), CircuitTarget(s)) + rmap + } + state.copy(circuit = newCircuit, renames = mainRename) } } diff --git a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala b/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala deleted file mode 100644 index 27388929e..000000000 --- a/tapeout/src/main/scala/transforms/RenameModulesAndInstances.scala +++ /dev/null @@ -1,45 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.annotations._ -import firrtl.ir._ -import firrtl.passes.Pass - -// This doesn't rename ExtModules under the assumption that they're some -// Verilog black box and therefore can't be renamed. Since the point is to -// allow FIRRTL to be linked together using "cat" and ExtModules don't get -// emitted, this should be safe. -class RenameModulesAndInstances(rename: (String) => String) extends Transform { - def inputForm = LowForm - def outputForm = LowForm - - def renameInstances(body: Statement): Statement = { - body match { - case m: DefInstance => new DefInstance(m.info, m.name, rename(m.module)) - case m: WDefInstance => new WDefInstance(m.info, m.name, rename(m.module), m.tpe) - case b: Block => new Block( b.stmts map { s => renameInstances(s) } ) - case s: Statement => s - } - } - - def run(state: CircuitState): (Circuit, RenameMap) = { - val myRenames = RenameMap() - val c = state.circuit - val modulesx = c.modules.map { - case m: ExtModule => - myRenames.record(ModuleTarget(c.main, m.name), ModuleTarget(c.main, rename(m.name))) - m.copy(name = rename(m.name)) - case m: Module => - myRenames.record(ModuleTarget(c.main, m.name), ModuleTarget(c.main, rename(m.name))) - new Module(m.info, rename(m.name), m.ports, renameInstances(m.body)) - } - (Circuit(c.info, modulesx, c.main), myRenames) - } - - def execute(state: CircuitState): CircuitState = { - val (ret, renames) = run(state) - state.copy(circuit = ret, renames = Some(renames)) - } -} From 757c39ac1cd5bc79fad948a703ea23f5748e2d93 Mon Sep 17 00:00:00 2001 From: Albert Magyar Date: Wed, 6 May 2020 21:40:47 +0000 Subject: [PATCH 168/273] Change macrocompiler to support FIRRTL 1.3 -- not backwards compatible --- macros/src/main/scala/MacroCompiler.scala | 46 +++++++++-------------- macros/src/main/scala/Utils.scala | 8 +++- 2 files changed, 23 insertions(+), 31 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 652e36e99..5d1be9c7d 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -289,8 +289,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * address bits into account. */ if (mem.src.depth > lib.src.depth) { mem.src.ports foreach { port => - val high = ceilLog2(mem.src.depth) - val low = ceilLog2(lib.src.depth) + val high = MacroCompilerMath.ceilLog2(mem.src.depth) + val low = MacroCompilerMath.ceilLog2(lib.src.depth) val ref = WRef(port.address.name) val nodeName = s"${ref.name}_sel" val tpe = UIntType(IntWidth(high-low)) @@ -738,7 +738,7 @@ class MacroCompiler extends Compiler { def transforms: Seq[Transform] = Seq(new MacroCompilerTransform) ++ - getLoweringTransforms(firrtl.HighForm, firrtl.LowForm) ++ + getLoweringTransforms(firrtl.ChirrtlForm, firrtl.LowForm) ++ Seq(new MacroCompilerOptimizations) } @@ -834,36 +834,24 @@ object MacroCompiler extends App { ) )) ) - // Append a NoDCEAnnotation to avoid dead code elimination removing the non-parent SRAMs - val state = CircuitState(circuit, HighForm, annotations :+ NoDCEAnnotation) - - // Run the compiler. - val result = new MacroCompiler().compileAndEmit(state) - - // Write output FIRRTL file. - params.get(Firrtl) match { - case Some(firrtlFile: String) => { - val fileWriter = new FileWriter(new File(firrtlFile)) - fileWriter.write(result.circuit.serialize) - fileWriter.close() - } - case None => - } - // Write output Verilog file. - params.get(Verilog) match { - case Some(verilogFile: String) => { - // Open the writer for the output Verilog file. - val verilogWriter = new FileWriter(new File(verilogFile)) + // The actual MacroCompilerTransform basically just generates an input circuit + val macroCompilerInput = CircuitState(circuit, HighForm, annotations) + val macroCompiled = (new MacroCompilerTransform).execute(macroCompilerInput) - // Extract Verilog circuit and write it. - verilogWriter.write(result.getEmittedCircuit.value) - // Close the writer. - verilogWriter.close() - } - case None => + // Since the MacroCompiler defines its own CLI, reconcile this with FIRRTL options + val firOptions = new ExecutionOptionsManager("macrocompiler") with HasFirrtlOptions { + firrtlOptions = FirrtlExecutionOptions( + outputFileNameOverride = params.get(Verilog).getOrElse(""), + noDCE = true, + firrtlSource = Some(macroCompiled.circuit.serialize) + ) } + + // Run FIRRTL compiler + Driver.execute(firOptions) + params.get(HammerIR) match { case Some(hammerIRFile: String) => { val lines = Source.fromFile(hammerIRFile).getLines().toList diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/Utils.scala index ba8c664df..c416ca6a0 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/Utils.scala @@ -6,12 +6,16 @@ import firrtl._ import firrtl.ir._ import firrtl.PrimOps import firrtl.passes.memlib.{MemConf, MemPort, ReadPort, WritePort, ReadWritePort, MaskedWritePort, MaskedReadWritePort} -import firrtl.Utils.{ceilLog2, BoolType} +import firrtl.Utils.BoolType import mdf.macrolib.{Constant, MacroPort, SRAMMacro} import mdf.macrolib.{PolarizedPort, PortPolarity, ActiveLow, ActiveHigh, NegativeEdge, PositiveEdge, MacroExtraPort} import java.io.File import scala.language.implicitConversions +object MacroCompilerMath { + def ceilLog2(x: BigInt): Int = (x-1).bitLength +} + class FirrtlMacroPort(port: MacroPort) { val src = port @@ -19,7 +23,7 @@ class FirrtlMacroPort(port: MacroPort) { val isWriter = port.input.nonEmpty && port.output.isEmpty val isReadWriter = port.input.nonEmpty && port.output.nonEmpty - val addrType = UIntType(IntWidth(ceilLog2(port.depth.get) max 1)) + val addrType = UIntType(IntWidth(MacroCompilerMath.ceilLog2(port.depth.get) max 1)) val dataType = UIntType(IntWidth(port.width.get)) val maskType = UIntType(IntWidth(port.width.get / port.effectiveMaskGran)) From c4e5f66c5e795c744b7f3be2780f7f2bb15e9e23 Mon Sep 17 00:00:00 2001 From: Albert Magyar Date: Wed, 13 May 2020 10:26:40 -0700 Subject: [PATCH 169/273] Provide MidForm circuit to MacroCompilerTransform --- macros/src/main/scala/MacroCompiler.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 5d1be9c7d..cb24bc9cf 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -836,7 +836,7 @@ object MacroCompiler extends App { ) // The actual MacroCompilerTransform basically just generates an input circuit - val macroCompilerInput = CircuitState(circuit, HighForm, annotations) + val macroCompilerInput = CircuitState(circuit, MidForm, annotations) val macroCompiled = (new MacroCompilerTransform).execute(macroCompilerInput) From b1c1f01c906dbc38183b993fd05ac2889b2cc8e4 Mon Sep 17 00:00:00 2001 From: Colin Schmidt Date: Fri, 29 May 2020 15:09:45 -0700 Subject: [PATCH 170/273] Fix direction of output enable in output io cell --- iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v b/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v index b3ee47ced..8023fb70d 100644 --- a/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v +++ b/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v @@ -38,7 +38,7 @@ endmodule module GenericDigitalOutIOCell( output pad, input o, - output oe + input oe ); assign pad = oe ? o : 1'bz; From aa1c90c4ccb73c2c379550f3296892cc81e8a195 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Tue, 30 Jun 2020 12:24:05 -0700 Subject: [PATCH 171/273] Fix IOCells generation * Fixes Bool wires matching both Reset and Bits --- iocell/src/main/scala/chisel/IOCell.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index b453e508c..14c296031 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -202,7 +202,6 @@ object IOCell { } case (coreSignal: Clock, padSignal: Clock) => genCellForClock(coreSignal, padSignal) case (coreSignal: AsyncReset, padSignal: AsyncReset) => genCellForAsyncReset(coreSignal, padSignal) - case (coreSignal: Reset, padSignal: Reset) => genCellForAbstractReset(coreSignal, padSignal) case (coreSignal: Bits, padSignal: Bits) => { require(padSignal.getWidth == coreSignal.getWidth, "padSignal and coreSignal must be the same width") if (padSignal.getWidth == 0) { @@ -248,6 +247,7 @@ object IOCell { } } } + case (coreSignal: Reset, padSignal: Reset) => genCellForAbstractReset(coreSignal, padSignal) case (coreSignal: Vec[_], padSignal: Vec[_]) => { require(padSignal.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same") coreSignal.zip(padSignal).zipWithIndex.foldLeft(Seq.empty[IOCell]) { case (total, ((core, pad), i)) => From ba681676f338af158023c99b4c802009aa0b601b Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 4 Sep 2020 13:29:31 -0700 Subject: [PATCH 172/273] Clean up IOCell types and parameterization --- iocell/src/main/scala/chisel/IOCell.scala | 83 ++++++++++------------- 1 file changed, 35 insertions(+), 48 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index 14c296031..452aadedb 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -4,7 +4,7 @@ package barstools.iocell.chisel import chisel3._ import chisel3.util.{Cat, HasBlackBoxResource} -import chisel3.experimental.{Analog, DataMirror, IO} +import chisel3.experimental.{Analog, DataMirror, IO, BaseModule} // The following four IO cell bundle types are bare-minimum functional connections // for modeling 4 different IO cell scenarios. The intention is that the user @@ -63,76 +63,68 @@ class DigitalInIOCellBundle extends Bundle { val ie = Input(Bool()) } -abstract class IOCell extends BlackBox +trait IOCell extends BaseModule -abstract class AnalogIOCell extends IOCell { - val io: AnalogIOCellBundle +trait AnalogIOCell extends IOCell { + val io = IO(new AnalogIOCellBundle) } -abstract class DigitalGPIOCell extends IOCell { - val io: DigitalGPIOCellBundle +trait DigitalGPIOCell extends IOCell { + val io = IO(new DigitalGPIOCellBundle) } -abstract class DigitalInIOCell extends IOCell { - val io: DigitalInIOCellBundle +trait DigitalInIOCell extends IOCell { + val io = IO(new DigitalInIOCellBundle) } -abstract class DigitalOutIOCell extends IOCell { - val io: DigitalOutIOCellBundle +trait DigitalOutIOCell extends IOCell { + val io = IO(new DigitalOutIOCellBundle) } // The following Generic IO cell black boxes have verilog models that mimic a very simple // implementation of an IO cell. For building a real chip, it is important to implement // and use similar classes which wrap the foundry-specific IO cells. -trait IsGenericIOCell extends HasBlackBoxResource { +abstract class GenericIOCell extends BlackBox with HasBlackBoxResource { addResource("/barstools/iocell/vsrc/IOCell.v") } -class GenericAnalogIOCell extends AnalogIOCell with IsGenericIOCell { - val io = IO(new AnalogIOCellBundle) -} +class GenericAnalogIOCell extends GenericIOCell with AnalogIOCell +class GenericDigitalGPIOCell extends GenericIOCell with DigitalGPIOCell +class GenericDigitalInIOCell extends GenericIOCell with DigitalInIOCell +class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell -class GenericDigitalGPIOCell extends DigitalGPIOCell with IsGenericIOCell { - val io = IO(new DigitalGPIOCellBundle) +trait IOCellTypeParams { + def analog(): AnalogIOCell + def gpio(): DigitalGPIOCell + def input(): DigitalInIOCell + def output(): DigitalOutIOCell } -class GenericDigitalInIOCell extends DigitalInIOCell with IsGenericIOCell { - val io = IO(new DigitalInIOCellBundle) -} - -class GenericDigitalOutIOCell extends DigitalOutIOCell with IsGenericIOCell { - val io = IO(new DigitalOutIOCellBundle) +case class GenericIOCellParams() extends IOCellTypeParams { + def analog() = Module(new GenericAnalogIOCell) + def gpio() = Module(new GenericDigitalGPIOCell) + def input() = Module(new GenericDigitalInIOCell) + def output() = Module(new GenericDigitalOutIOCell) } object IOCell { - - def genericAnalog() = Module(new GenericAnalogIOCell) - def genericGPIO() = Module(new GenericDigitalGPIOCell) - def genericInput() = Module(new GenericDigitalInIOCell) - def genericOutput() = Module(new GenericDigitalOutIOCell) - /** * From within a RawModule or MultiIOModule context, generate new module IOs from a given * signal and return the new IO and a Seq containing all generated IO cells. * @param coreSignal The signal onto which to add IO cells * @param name An optional name or name prefix to use for naming IO cells - * @param inFn A function to generate a DigitalInIOCell to use for input signals - * @param outFn A function to generate a DigitalOutIOCell to use for output signals - * @param anaFn A function to generate an AnalogIOCell to use for analog signals * @param abstractResetAsAsync When set, will coerce abstract resets to * AsyncReset, and otherwise to Bool (sync reset) * @return A tuple of (the generated IO data node, a Seq of all generated IO cell instances) */ def generateIOFromSignal[T <: Data](coreSignal: T, name: Option[String] = None, - inFn: () => DigitalInIOCell = IOCell.genericInput, - outFn: () => DigitalOutIOCell = IOCell.genericOutput, - anaFn: () => AnalogIOCell = IOCell.genericAnalog, + typeParams: IOCellTypeParams = GenericIOCellParams(), abstractResetAsAsync: Boolean = false): (T, Seq[IOCell]) = { val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)) val resetFn = if (abstractResetAsAsync) toAsyncReset else toSyncReset - val iocells = IOCell.generateFromSignal(coreSignal, padSignal, name, inFn, outFn, anaFn, resetFn) + val iocells = IOCell.generateFromSignal(coreSignal, padSignal, name, typeParams, resetFn) (padSignal, iocells) } @@ -142,9 +134,6 @@ object IOCell { * @param coreSignal The core-side (internal) signal onto which to connect/add IO cells * @param padSignal The pad-side (external) signal onto which to connect IO cells * @param name An optional name or name prefix to use for naming IO cells - * @param inFn A function to generate a DigitalInIOCell to use for input signals - * @param outFn A function to generate a DigitalOutIOCell to use for output signals - * @param anaFn A function to generate an AnalogIOCell to use for analog signals * @return A Seq of all generated IO cell instances */ val toSyncReset: (Reset) => Bool = _.toBool @@ -153,9 +142,7 @@ object IOCell { coreSignal: T, padSignal: T, name: Option[String] = None, - inFn: () => DigitalInIOCell = IOCell.genericInput, - outFn: () => DigitalOutIOCell = IOCell.genericOutput, - anaFn: () => AnalogIOCell = IOCell.genericAnalog, + typeParams: IOCellTypeParams = GenericIOCellParams(), concretizeResetFn : (Reset) => R = toSyncReset): Seq[IOCell] = { def genCell[T <: Data]( @@ -165,7 +152,7 @@ object IOCell { padSignal: T): Seq[IOCell] = { DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { - val iocell = inFn() + val iocell = typeParams.input() name.foreach(n => iocell.suggestName(n)) coreSignal := castFromBool(iocell.io.i) iocell.io.ie := true.B @@ -173,7 +160,7 @@ object IOCell { Seq(iocell) } case ActualDirection.Output => { - val iocell = outFn() + val iocell = typeParams.output() name.foreach(n => iocell.suggestName(n)) iocell.io.o := castToBool(coreSignal) iocell.io.oe := true.B @@ -193,7 +180,7 @@ object IOCell { Seq() } else { require(coreSignal.getWidth == 1, "Analogs wider than 1 bit are not supported because we can't bit-select Analogs (https://github.com/freechipsproject/chisel3/issues/536)") - val iocell = anaFn() + val iocell = typeParams.analog() name.foreach(n => iocell.suggestName(n)) iocell.io.core <> coreSignal padSignal <> iocell.io.pad @@ -215,7 +202,7 @@ object IOCell { DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { val iocells = padSignal.asBools.zipWithIndex.map { case (sig, i) => - val iocell = inFn() + val iocell = typeParams.input() // Note that we are relying on chisel deterministically naming this in the index order (which it does) // This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals // An alternative solution would be to suggestName(n + "_" + i) @@ -230,7 +217,7 @@ object IOCell { } case ActualDirection.Output => { val iocells = coreSignal.asBools.zipWithIndex.map { case (sig, i) => - val iocell = outFn() + val iocell = typeParams.output() // Note that we are relying on chisel deterministically naming this in the index order (which it does) // This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals // An alternative solution would be to suggestName(n + "_" + i) @@ -251,14 +238,14 @@ object IOCell { case (coreSignal: Vec[_], padSignal: Vec[_]) => { require(padSignal.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same") coreSignal.zip(padSignal).zipWithIndex.foldLeft(Seq.empty[IOCell]) { case (total, ((core, pad), i)) => - val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + i), inFn, outFn, anaFn) + val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + i), typeParams) total ++ ios } } case (coreSignal: Record, padSignal: Record) => { coreSignal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (eltName, core)) => val pad = padSignal.elements(eltName) - val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + eltName), inFn, outFn, anaFn) + val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + eltName), typeParams) total ++ ios } } From e4cd2b01fe5e4e89770a81443cf609657b683a67 Mon Sep 17 00:00:00 2001 From: chick Date: Thu, 10 Sep 2020 14:35:10 -0700 Subject: [PATCH 173/273] This is mess clean it up --- .scalafmt.conf | 26 ++ build.sbt | 4 +- macros/src/test/scala/MacroCompilerSpec.scala | 3 +- .../transforms/.clkgen/ClkAnnotations.scala | 247 ------------------ .../scala/transforms/.clkgen/ClkDivider.scala | 130 --------- .../transforms/.clkgen/ClkSrcTransform.scala | 34 --- .../.clkgen/CreateClkConstraints.scala | 152 ----------- .../transforms/.pads/ChiselTopModule.scala | 60 +++-- .../transforms/.pads/PadAnnotations.scala | 79 ++++-- .../scala/transforms/.clkgen/ClkGenSpec.scala | 181 ------------- 10 files changed, 120 insertions(+), 796 deletions(-) create mode 100644 .scalafmt.conf delete mode 100644 tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala delete mode 100644 tapeout/src/main/scala/transforms/.clkgen/ClkDivider.scala delete mode 100644 tapeout/src/main/scala/transforms/.clkgen/ClkSrcTransform.scala delete mode 100644 tapeout/src/main/scala/transforms/.clkgen/CreateClkConstraints.scala delete mode 100644 tapeout/src/test/scala/transforms/.clkgen/ClkGenSpec.scala diff --git a/.scalafmt.conf b/.scalafmt.conf new file mode 100644 index 000000000..f74e55047 --- /dev/null +++ b/.scalafmt.conf @@ -0,0 +1,26 @@ +version = 2.6.4 + +maxColumn = 120 +align = most +continuationIndent.defnSite = 2 +assumeStandardLibraryStripMargin = true +docstrings = ScalaDoc +lineEndings = preserve +includeCurlyBraceInSelectChains = false +danglingParentheses = true + +align.tokens.add = [ + { + code = ":" + } +] + +newlines.alwaysBeforeCurlyBraceLambdaParams = false +newlines.alwaysBeforeMultilineDef = false +newlines.implicitParamListModifierForce = [before] + +verticalMultiline.atDefnSite = true + +optIn.annotationNewlines = true + +rewrite.rules = [SortImports, PreferCurlyFors, AvoidInfix] diff --git a/build.sbt b/build.sbt index 9ec44bf7c..c01a71e65 100644 --- a/build.sbt +++ b/build.sbt @@ -1,8 +1,8 @@ // See LICENSE for license details. val defaultVersions = Map( - "chisel3" -> "3.2-SNAPSHOT", - "chisel-iotesters" -> "1.3-SNAPSHOT" + "chisel3" -> "3.4-SNAPSHOT", + "chisel-iotesters" -> "1.5-SNAPSHOT" ) lazy val commonSettings = Seq( diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index dfecc0c1c..0bc0f486a 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -1,9 +1,10 @@ +// See LICENSE for license details. + package barstools.macros import firrtl.ir.{Circuit, NoInfo} import firrtl.passes.RemoveEmpty import firrtl.Parser.parse -import firrtl.Utils.ceilLog2 import java.io.{File, StringWriter} import mdf.macrolib.SRAMMacro diff --git a/tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala b/tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala deleted file mode 100644 index 3a2f9ba3c..000000000 --- a/tapeout/src/main/scala/transforms/.clkgen/ClkAnnotations.scala +++ /dev/null @@ -1,247 +0,0 @@ -package barstools.tapeout.transforms.clkgen - -import net.jcazevedo.moultingyaml._ -import firrtl.annotations._ -import chisel3.experimental._ -import chisel3._ -import firrtl._ -import firrtl.transforms.DedupModules - -object ClkAnnotationsYaml extends DefaultYamlProtocol { - implicit val _clksrc = yamlFormat3(ClkSrc) - implicit val _sink = yamlFormat1(Sink) - implicit val _clkport = yamlFormat2(ClkPortAnnotation) - implicit val _genclk = yamlFormat4(GeneratedClk) - implicit val _clkmod = yamlFormat2(ClkModAnnotation) -} -case class ClkSrc(period: Double, waveform: Seq[Double] = Seq(), async: Seq[String] = Seq()) { - def getWaveform = if (waveform == Seq.empty) Seq(0, period/2) else waveform - // async = ids of top level clocks that are async with this clk - // Default is 50% duty cycle, period units is default - require(getWaveform.sorted == getWaveform, "Waveform edges must be in order") - require(getWaveform.length == 2, "Must specify time for rising edge, then time for falling edge") -} - -case class Sink(src: Option[ClkSrc] = None) - -case class ClkPortAnnotation(tag: Option[Sink] = None, id: String) { - import ClkAnnotationsYaml._ - def serialize: String = this.toYaml.prettyPrint -} - -abstract class ClkModType { - def serialize: String -} -case object ClkMux extends ClkModType { - def serialize: String = "mux" -} -case object ClkDiv extends ClkModType { - def serialize: String = "div" -} -case object ClkGen extends ClkModType { - def serialize: String = "gen" -} - -// Unlike typical SDC, starts at 0. -// Otherwise, see pg. 63 of "Constraining Designs for Synthesis and Timing Analysis" -// by S. Gangadharan -// original clk: |-----|_____|-----|_____| -// edges: 0 1 2 3 4 -// div. by 4, 50% duty cycle --> edges = 0, 2, 4 -// ---> |-----------|___________| -// sources = source id's -case class GeneratedClk( - id: String, - sources: Seq[String] = Seq(), - referenceEdges: Seq[Int] = Seq(), - period: Option[Double] = None) { - require(referenceEdges.sorted == referenceEdges, "Edges must be in order for generated clk") - if (referenceEdges.nonEmpty) require(referenceEdges.length % 2 == 1, "# of reference edges must be odd!") -} - -case class ClkModAnnotation(tpe: String, generatedClks: Seq[GeneratedClk]) { - - def modType: ClkModType = HasClkAnnotation.modType(tpe) - - modType match { - case ClkDiv => - generatedClks foreach { c => - require(c.referenceEdges.nonEmpty, "Reference edges must be defined for clk divider!") - require(c.sources.length == 1, "Clk divider output can only have 1 source") - require(c.period.isEmpty, "No period should be specified for clk divider output") - } - case ClkMux => - generatedClks foreach { c => - require(c.referenceEdges.isEmpty, "Reference edges must not be defined for clk mux!") - require(c.period.isEmpty, "No period should be specified for clk mux output") - require(c.sources.nonEmpty, "Clk muxes must have sources!") - } - case ClkGen => - generatedClks foreach { c => - require(c.referenceEdges.isEmpty, "Reference edges must not be defined for clk gen!") - require(c.sources.isEmpty, "Clk generators shouldn't have constrained sources") - require(c.period.nonEmpty, "Clk generator output period should be specified!") - } - } - import ClkAnnotationsYaml._ - def serialize: String = this.toYaml.prettyPrint -} - -abstract class FirrtlClkTransformAnnotation { - def targetName: String -} - -// Firrtl version -case class TargetClkModAnnoF(target: ModuleName, anno: ClkModAnnotation) extends FirrtlClkTransformAnnotation with SingleTargetAnnotation[ModuleName] { - def duplicate(n: ModuleName): TargetClkModAnnoF = this.copy(target = n) - def getAnno = Annotation(target, classOf[ClkSrcTransform], anno.serialize) - def targetName = target.name - def modType = anno.modType - def generatedClks = anno.generatedClks - def getAllClkPorts = anno.generatedClks.map(x => - List(List(x.id), x.sources).flatten).flatten.distinct.map(Seq(targetName, _).mkString(".")) -} - -// Chisel version -case class TargetClkModAnnoC(target: Module, anno: ClkModAnnotation) extends ChiselAnnotation { - def toFirrtl = TargetClkModAnnoF(target.toNamed, anno) -} - -// Firrtl version -case class TargetClkPortAnnoF(target: ComponentName, anno: ClkPortAnnotation) extends FirrtlClkTransformAnnotation with SingleTargetAnnotation[ComponentName] { - def duplicate(n: ComponentName): TargetClkPortAnnoF = this.copy(target = n) - def getAnno = Annotation(target, classOf[ClkSrcTransform], anno.serialize) - def targetName = Seq(target.module.name, target.name).mkString(".") - def modId = Seq(target.module.name, anno.id).mkString(".") - def sink = anno.tag -} - -// Chisel version -case class TargetClkPortAnnoC(target: Element, anno: ClkPortAnnotation) extends ChiselAnnotation { - def toFirrtl = TargetClkPortAnnoF(target.toNamed, anno) -} - -object HasClkAnnotation { - - import ClkAnnotationsYaml._ - - def modType(tpe: String): ClkModType = tpe match { - case s: String if s == ClkMux.serialize => ClkMux - case s: String if s == ClkDiv.serialize => ClkDiv - case s: String if s == ClkGen.serialize => ClkGen - case _ => throw new Exception("Clock module annotaiton type invalid") - } - - def unapply(a: Annotation): Option[FirrtlClkTransformAnnotation] = a match { - case Annotation(f, t, s) if t == classOf[ClkSrcTransform] => f match { - case m: ModuleName => - Some(TargetClkModAnnoF(m, s.parseYaml.convertTo[ClkModAnnotation])) - case c: ComponentName => - Some(TargetClkPortAnnoF(c, s.parseYaml.convertTo[ClkPortAnnotation])) - case _ => throw new Exception("Clk source annotation only valid on module or component!") - } - case _ => None - } - - def apply(annos: Seq[Annotation]): Option[(Seq[TargetClkModAnnoF],Seq[TargetClkPortAnnoF])] = { - // Get all clk-related annotations - val clkAnnos = annos.map(x => unapply(x)).flatten - val targets = clkAnnos.map(x => x.targetName) - require(targets.distinct.length == targets.length, "Only 1 clk related annotation is allowed per component/module") - if (clkAnnos.length == 0) None - else { - val componentAnnos = clkAnnos.filter { - case TargetClkPortAnnoF(ComponentName(_, ModuleName(_, _)), _) => true - case _ => false - }.map(x => x.asInstanceOf[TargetClkPortAnnoF]) - val associatedMods = componentAnnos.map(x => x.target.module.name) - val moduleAnnos = clkAnnos.filter { - case TargetClkModAnnoF(ModuleName(m, _), _) => - require(associatedMods contains m, "Clk modules should always have clk port annotations!") - true - case _ => false - }.map(x => x.asInstanceOf[TargetClkModAnnoF]) - Some((moduleAnnos, componentAnnos)) - } - } - -} - -// Applies to both black box + normal module -trait IsClkModule { - - self: chisel3.Module => - - doNotDedup(this) - - private def extractElementNames(signal: Data): Seq[String] = { - val names = signal match { - case elt: Record => - elt.elements.map { case (key, value) => extractElementNames(value).map(x => key + "_" + x) }.toSeq.flatten - case elt: Vec[_] => - elt.zipWithIndex.map { case (elt, i) => extractElementNames(elt).map(x => i + "_" + x) }.toSeq.flatten - case elt: Element => Seq("") - case elt => throw new Exception(s"Cannot extractElementNames for type ${elt.getClass}") - } - names.map(s => s.stripSuffix("_")) - } - - // TODO: Replace! - def extractElements(signal: Data): Seq[Element] = { - signal match { - case elt: Record => - elt.elements.map { case (key, value) => extractElements(value) }.toSeq.flatten - case elt: Vec[_] => - elt.map { elt => extractElements(elt) }.toSeq.flatten - case elt: Element => Seq(elt) - case elt => throw new Exception(s"Cannot extractElements for type ${elt.getClass}") - } - } - - def getIOName(signal: Element): String = { - val possibleNames = extractElements(io).zip(extractElementNames(io)).map { - case (sig, name) if sig == signal => Some(name) - case _ => None - }.flatten - if (possibleNames.length == 1) possibleNames.head - else throw new Exception("You can only get the name of an io port!") - } - - def annotateDerivedClks(tpe: ClkModType, generatedClks: Seq[GeneratedClk]): Unit = - annotateDerivedClks(ClkModAnnotation(tpe.serialize, generatedClks)) - def annotateDerivedClks(anno: ClkModAnnotation): Unit = annotateDerivedClks(this, anno) - def annotateDerivedClks(m: Module, anno: ClkModAnnotation): Unit = - annotate(TargetClkModAnnoC(m, anno)) - - def annotateClkPort(p: Element): Unit = annotateClkPort(p, None, "") - def annotateClkPort(p: Element, sink: Sink): Unit = annotateClkPort(p, Some(sink), "") - def annotateClkPort(p: Element, id: String): Unit = annotateClkPort(p, None, id) - def annotateClkPort(p: Element, sink: Sink, id: String): Unit = annotateClkPort(p, Some(sink), id) - def annotateClkPort(p: Element, sink: Option[Sink], id: String): Unit = { - // If no id is specified, it'll try to figure out a name, assuming p is an io port - val newId = id match { - case "" => - getIOName(p) - case _ => id - } - annotateClkPort(p, ClkPortAnnotation(sink, newId)) - } - - def annotateClkPort(p: Element, anno: ClkPortAnnotation): Unit = { - DataMirror.directionOf(p) match { - case chisel3.core.ActualDirection.Input => - require(anno.tag.nonEmpty, "Module inputs must be clk sinks") - require(anno.tag.get.src.isEmpty, - "Clock module (not top) input clks should not have clk period, etc. specified") - case chisel3.core.ActualDirection.Output => - require(anno.tag.isEmpty, "Module outputs must not be clk sinks (they're sources!)") - case _ => - throw new Exception("Clk port direction must be specified!") - } - p match { - case _: chisel3.core.Clock => - case _ => throw new Exception("Clock port must be of type Clock") - } - annotate(TargetClkPortAnnoC(p, anno)) - } -} diff --git a/tapeout/src/main/scala/transforms/.clkgen/ClkDivider.scala b/tapeout/src/main/scala/transforms/.clkgen/ClkDivider.scala deleted file mode 100644 index 23402982f..000000000 --- a/tapeout/src/main/scala/transforms/.clkgen/ClkDivider.scala +++ /dev/null @@ -1,130 +0,0 @@ -package barstools.tapeout.transforms.clkgen - -import chisel3.experimental.{withClockAndReset, withClock, withReset} -import chisel3._ -import barstools.tapeout.transforms._ -import chisel3.util.HasBlackBoxInline - -// WARNING: ONLY WORKS WITH VERILATOR B/C YOU NEED ASYNC RESET! - -class SEClkDividerIO(phases: Seq[Int]) extends Bundle { - val reset = Input(Bool()) - val inClk = Input(Clock()) - val outClks = Output(CustomIndexedBundle(Clock(), phases)) - override def cloneType = (new SEClkDividerIO(phases)).asInstanceOf[this.type] -} - -class SEClkDividerBB(phases: Seq[Int], f: String) extends BlackBox with HasBlackBoxInline { - val verilog = scala.io.Source.fromFile(f).getLines.mkString("\n") - // names without io - val io = IO(new SEClkDividerIO(phases)) - val modName = this.getClass.getSimpleName - require(verilog contains modName, "Clk divider Verilog module must be named ClkDividerBB") - io.elements foreach { case (field, elt) => - require(verilog contains field, s"Verilog file should contain io ${field}")} - setInline(s"${modName}.v", verilog) -} - -class AsyncRegInit extends BlackBox with HasBlackBoxInline { - val io = IO(new Bundle { - val clk = Input(Clock()) - val reset = Input(Bool()) - val init = Input(Bool()) - val in = Input(Bool()) - val out = Output(Bool()) - }) - - setInline("AsyncRegInit.v", - s""" - |module AsyncRegInit( - | input clk, - | input reset, - | input init, - | input in, - | output reg out - |); - | always @ (posedge clk or posedge reset) begin - | if (reset) begin - | out <= init; - | end else begin - | out <= in; - | end - | end - |endmodule - """.stripMargin) -} - -object AsyncRegInit { - def apply(clk: Clock, reset: Bool, init: Bool): AsyncRegInit = { - val asyncRegInit = Module(new AsyncRegInit) - asyncRegInit.io.clk := clk - asyncRegInit.io.reset := reset - asyncRegInit.io.init := init - asyncRegInit - } -} - -// TODO: Convert analogFile into implicit? -// If syncReset = false, it's implied that reset is strobed before any clk rising edge happens -// i.e. when this is a clkgen fed by another clkgen --> need to adjust the indexing b/c -// you're already shifting on the first clk rising edge -class SEClkDivider(divBy: Int, phases: Seq[Int], analogFile: String = "", syncReset: Boolean = true) - extends Module with IsClkModule { - - require(phases.distinct.length == phases.length, "Phases should be distinct!") - phases foreach { p => - require(p < divBy, "Phases must be < divBy") - } - - val io = IO(new SEClkDividerIO(phases)) - - annotateClkPort(io.inClk, Sink()) - - val referenceEdges = phases.map(p => Seq(2 * p, 2 * (p + 1), 2 * (p + divBy))) - - val generatedClks = io.outClks.elements.zip(referenceEdges).map { case ((field, eltx), edges) => - val elt = eltx.asInstanceOf[Element] - annotateClkPort(elt) - GeneratedClk(getIOName(elt), sources = Seq(getIOName(io.inClk)), edges) - }.toSeq - - annotateDerivedClks(ClkDiv, generatedClks) - - require(divBy >= 1, "Clk division factor must be >= 1") - - divBy match { - case i: Int if i == 1 => - require(phases == Seq(0), "Clk division by 1 shouldn't generate new phases") - io.outClks(0) := io.inClk - case i: Int if i > 1 && analogFile == "" => - // Shift register based clock divider (duty cycle is NOT 50%) - val initVals = Seq(true.B) ++ Seq.fill(divBy - 1)(false.B) - - /************ Real design assumes asnyc reset!!! - withClockAndReset(io.inClk, io.reset) { - val regs = initVals.map(i => RegInit(i)) - // Close the loop - regs.head := regs.last - // Shift register - regs.tail.zip(regs.init) foreach { case (lhs, rhs) => lhs := rhs } - // Assign register output to correct clk out - phases foreach { idx => io.outClks(idx) := regs(idx).asClock } - } - *************/ - - val regs = initVals.map(i => AsyncRegInit(io.inClk, io.reset, i)) - regs.head.io.in := regs.last.io.out - regs.tail.zip(regs.init) foreach { case (lhs, rhs) => lhs.io.in := rhs.io.out } - phases foreach { idx => - val regIdx = if (syncReset) idx else (idx + 1) % divBy - io.outClks(idx) := regs(regIdx).io.out.asClock - } - - case _ => - if (new java.io.File(analogFile).exists) { - val bb = Module(new SEClkDividerBB(phases, analogFile)) - io <> bb.io - } - else throw new Exception("Clock divider Verilog file invalid!") - } -} diff --git a/tapeout/src/main/scala/transforms/.clkgen/ClkSrcTransform.scala b/tapeout/src/main/scala/transforms/.clkgen/ClkSrcTransform.scala deleted file mode 100644 index 0f5dc5712..000000000 --- a/tapeout/src/main/scala/transforms/.clkgen/ClkSrcTransform.scala +++ /dev/null @@ -1,34 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.clkgen - -import firrtl._ -import firrtl.passes._ - -import scala.collection.mutable - -class ClkSrcTransform extends Transform with SeqTransformBased { - - override def inputForm: CircuitForm = LowForm - override def outputForm: CircuitForm = LowForm - - val transformList = new mutable.ArrayBuffer[Transform] - def transforms = transformList - - override def execute(state: CircuitState): CircuitState = { - val collectedAnnos = HasClkAnnotation(getMyAnnotations(state)) - collectedAnnos match { - // Transform not used - case None => CircuitState(state.circuit, LowForm) - case Some((clkModAnnos, clkPortAnnos)) => - val targetDir = barstools.tapeout.transforms.GetTargetDir(state) - - transformList ++= Seq( - // InferTypes, - new CreateClkConstraints(clkModAnnos, clkPortAnnos, targetDir) - ) - val ret = runTransforms(state) - CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) - } - } -} diff --git a/tapeout/src/main/scala/transforms/.clkgen/CreateClkConstraints.scala b/tapeout/src/main/scala/transforms/.clkgen/CreateClkConstraints.scala deleted file mode 100644 index 6975eb735..000000000 --- a/tapeout/src/main/scala/transforms/.clkgen/CreateClkConstraints.scala +++ /dev/null @@ -1,152 +0,0 @@ -// See license file for details - -package barstools.tapeout.transforms.clkgen - -import firrtl.passes.clocklist._ -import firrtl.annotations._ -import firrtl.ir._ -import firrtl.Utils._ -import barstools.tapeout.transforms._ -import scala.collection.immutable.ListMap - -// TODO: Really should be moved out of memlib -import firrtl.passes.memlib.AnalysisUtils._ -import firrtl.passes._ - -// TODO: Wait until Albert merges into firrtl -import firrtl.analyses._ - -class CreateClkConstraints( - clkModAnnos: Seq[TargetClkModAnnoF], - clkPortAnnos: Seq[TargetClkPortAnnoF], - targetDir: String) extends Pass { - - // TODO: Are annotations only valid on ports? - - def run(c: Circuit): Circuit = { -/* - val top = c.main - - // Remove everything from the circuit, unless it has a clock type - // This simplifies the circuit drastically so InlineInstances doesn't take forever. - val onlyClockCircuit = RemoveAllButClocks.run(c) - - val instanceGraph = new InstanceGraph(onlyClockCircuit) - - val clkModNames = clkModAnnos.map(x => x.targetName) - // ** Module name -> Absolute path of (unique) instance - val clkMods = clkModNames.map { x => - // NoDeDup was run so only 1 instance of each module should exist - val inst = instanceGraph.findInstancesInHierarchy(x) - require(inst.length == 1, "Clk modules should have not ben dedup-ed") - // Return map of module name to absolute path as a string - // Note: absolute path doesn't contain top module + to work with inlineInstances, - // delimit with $ - x -> inst.head.tail.map(y => y.name).mkString("$") - }.toMap - - val clkPortIds = clkPortAnnos.map { a => a.modId } - require(clkPortIds.distinct.length == clkPortIds.length, "All clk port IDs must be unique!") - - val allModClkPorts = clkModAnnos.map { x => - val modClkPorts = x.getAllClkPorts - require(modClkPorts.intersect(clkPortIds).length == modClkPorts.length, - "Clks given relationships via clk modules must have been annotated as clk ports") - modClkPorts - }.flatten.distinct - - val clkPortMap = clkPortIds.zip(clkPortAnnos).toMap - val clkModMap = clkModNames.zip(clkModAnnos).toMap - - val (clkSinksTemp, clkSrcsTemp) = clkPortAnnos.partition { - case TargetClkPortAnnoF(_, ClkPortAnnotation(tag, _)) if tag.nonEmpty => true - case _ => false - } - - def convertClkPortAnnoToMap(annos: Seq[TargetClkPortAnnoF]): ListMap[String, String] = - ListMap(annos.map { x => - val target = x.target - val absPath = { - if (top == target.module.name) LowerName(target.name) - else Seq(clkMods(target.module.name), LowerName(target.name)).mkString(".") - } - x.modId -> absPath - }.sortBy(_._1): _*) - - // ** clk port -> absolute path - val clkSinks = convertClkPortAnnoToMap(clkSinksTemp) - val clkSrcs = convertClkPortAnnoToMap(clkSrcsTemp) - - clkSrcs foreach { case (id, path) => - require(allModClkPorts contains id, "All clock source properties must be defined by their respective modules") } - - // Don't inline clock modules - val modulesToInline = (c.modules.collect { - case Module(_, n, _, _) if n != top && !clkModNames.contains(n) => - ModuleName(n, CircuitName(top)) - }).toSet - - val inlineTransform = new InlineInstances - val inlinedCircuit = inlineTransform.run(onlyClockCircuit, modulesToInline, Set(), None).circuit - - val topModule = inlinedCircuit.modules.find(_.name == top).getOrElse(throwInternalError) - - // Build a hashmap of connections to use for getOrigins - val connects = getConnects(topModule) - - // Clk sinks are either inputs to clock modules or top clk inputs --> separate - // ** clk port -> absolute path - val (topClks, clkModSinks) = clkSinks.partition { - case (modId, absPath) if modId.split("\\.").head == top => true - case _ => false - } - - // Must be 1:1 originally! - def flipMapping(m: ListMap[String, String]): ListMap[String, String] = - m.map { case (a, b) => b -> a } - - val clkSrcsFlip = flipMapping(clkSrcs) - val topClksFlip = flipMapping(topClks) - - // Find origins of clk mod sinks - val clkModSinkToSourceMap = clkModSinks.map { case (sinkId, sinkAbsPath) => - val sourceAbsPath = getOrigin(connects, sinkAbsPath).serialize - val sourceId = { - // sources of sinks are generated clks or top level clk inputs - if (clkSrcsFlip.contains(sourceAbsPath)) clkSrcsFlip(sourceAbsPath) - else if (topClksFlip.contains(sourceAbsPath)) topClksFlip(sourceAbsPath) - else throw new Exception(s"Absolute path $sourceAbsPath of clk source for $sinkId not found!") - } - sinkId -> sourceId - } - - c.modules.foreach { - case mod: DefModule => - mod.ports.foreach { - case Port(_, n, dir, tpe) - if tpe == ClockType && - ((dir == Input && mod.name == top) || (dir == Output && clkModNames.contains(mod.name))) => - clkPortAnnos.find(x => - // TODO: Not sufficiently general for output clks? Might have forgotten to label a clk module... - LowerName(x.target.name) == n && x.target.module.name == mod.name).getOrElse( - throw new Exception( - s"All top module input clks/clk module output clocks must be sinks/sources! $n not annotated!")) - case _ => - } - } - - // Find sinks used to derive clk mod sources - val clkModSourceToSinkMap: Seq[(String, Seq[String])] = clkModAnnos.map(x => { - val modName = x.targetName - x.generatedClks.map(y => Seq(modName, y.id).mkString(".") -> y.sources.map(z => Seq(modName, z).mkString("."))) - } ).flatten - - topClks.foreach {x => println(s"top clk: $x")} - clkModSinks.foreach { x => println(s"clk sink: $x")} - clkSrcs.foreach { x => println(s"gen clk: $x")} - clkModSinkToSourceMap.foreach { x => println(s"sink -> src: $x")} - clkModSourceToSinkMap.foreach { x => println(s"src -> dependent sinks: $x")} -*/ - c - } -} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala b/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala index 36979ce51..a348ff728 100644 --- a/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala +++ b/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala @@ -1,9 +1,11 @@ +// See LICENSE for license details. + package barstools.tapeout.transforms.pads import chisel3._ -import barstools.tapeout.transforms.clkgen._ import chisel3.experimental._ -import firrtl.transforms.DedupModules +import firrtl.Transform +import firrtl.annotations.Annotation // TODO: Move out of pads @@ -20,29 +22,12 @@ abstract class TopModule( coreHeight: Int = 0, usePads: Boolean = true, override_clock: Option[Clock] = None, - override_reset: Option[Bool] = None) extends Module with IsClkModule { + override_reset: Option[Bool] = None) extends Module { override_clock.foreach(clock := _) override_reset.foreach(reset := _) - override def annotateClkPort(p: Element, anno: ClkPortAnnotation): Unit = { - DataMirror.directionOf(p) match { - case chisel3.core.ActualDirection.Input => - require(anno.tag.nonEmpty, "Top Module input clks must be clk sinks") - require(anno.tag.get.src.nonEmpty, - "Top module input clks must have clk period, etc. specified") - case _ => - throw new Exception("Clk port direction must be specified!") - } - p match { - case _: chisel3.core.Clock => - case _ => throw new Exception("Clock port must be of type Clock") - } - annotate(TargetClkPortAnnoC(p, anno)) - } - - override def annotateDerivedClks(m: Module, anno: ClkModAnnotation): Unit = - throw new Exception("Top module cannot be pure clock module!") + private val mySelf = this // Annotate module as top module (that requires pad transform) // Specify the yaml file that indicates how pads are templated, @@ -55,7 +40,38 @@ abstract class TopModule( coreHeight = coreHeight, supplyAnnos = supplyAnnos ) - annotate(TargetModulePadAnnoC(this, modulePadAnnotation)) + //TODO: PORT-1.4: Remove commented code + // annotate(TargetModulePadAnnoC(this, modulePadAnnotation)) + annotate(new ChiselAnnotation with RunFirrtlTransform { + override def toFirrtl: Annotation = { + TargetModulePadAnnoF(mySelf.toNamed, modulePadAnnotation) + } + def transformClass: Class[_ <: Transform] = classOf[AddIOPadsTransform] + }) + } + + private def extractElementNames(signal: Data): Seq[String] = { + val names = signal match { + case elt: Record => + elt.elements.map { case (key, value) => extractElementNames(value).map(x => key + "_" + x) }.toSeq.flatten + case elt: Vec[_] => + elt.zipWithIndex.map { case (elt, i) => extractElementNames(elt).map(x => i + "_" + x) }.toSeq.flatten + case elt: Element => Seq("") + case elt => throw new Exception(s"Cannot extractElementNames for type ${elt.getClass}") + } + names.map(s => s.stripSuffix("_")) + } + + // TODO: Replace! + def extractElements(signal: Data): Seq[Element] = { + signal match { + case elt: Record => + elt.elements.map { case (key, value) => extractElements(value) }.toSeq.flatten + case elt: Vec[_] => + elt.map { elt => extractElements(elt) }.toSeq.flatten + case elt: Element => Seq(elt) + case elt => throw new Exception(s"Cannot extractElements for type ${elt.getClass}") + } } // Annotate IO with side + pad name diff --git a/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala b/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala index ed870092d..c1f2d7835 100644 --- a/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala +++ b/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala @@ -1,10 +1,10 @@ +// See LICENSE for license details. + package barstools.tapeout.transforms.pads import firrtl.annotations._ import chisel3.experimental._ import chisel3._ -import barstools.tapeout.transforms._ -import firrtl._ import net.jcazevedo.moultingyaml._ @@ -23,26 +23,32 @@ abstract class FirrtlPadTransformAnnotation { abstract class IOAnnotation { def serialize: String } + case class IOPadAnnotation(padSide: String, padName: String) extends IOAnnotation { import PadAnnotationsYaml._ def serialize: String = this.toYaml.prettyPrint def getPadSide: PadSide = HasPadAnnotation.getSide(padSide) } + case class NoIOPadAnnotation(noPad: String = "") extends IOAnnotation { import PadAnnotationsYaml._ def serialize: String = this.toYaml.prettyPrint - def field = "noPad:" + def field: String = "noPad:" } + // Firrtl version -case class TargetIOPadAnnoF(target: ComponentName, anno: IOAnnotation) extends FirrtlPadTransformAnnotation with SingleTargetAnnotation[ComponentName] { +case class TargetIOPadAnnoF(target: ComponentName, anno: IOAnnotation) + extends FirrtlPadTransformAnnotation with SingleTargetAnnotation[ComponentName] { + def duplicate(n: ComponentName): TargetIOPadAnnoF = this.copy(target = n) - def getAnno = Annotation(target, classOf[AddIOPadsTransform], anno.serialize) - def targetName = target.name + def targetName: String = target.name } + +//TODO: PORT-1.4: Remove commented code // Chisel version -case class TargetIOPadAnnoC(target: Element, anno: IOAnnotation) extends ChiselAnnotation { - def toFirrtl = TargetIOPadAnnoF(target.toNamed, anno) -} +//case class TargetIOPadAnnoC(target: Element, anno: IOAnnotation) extends ChiselAnnotation { +// def toFirrtl = TargetIOPadAnnoF(target.toNamed, anno) +//} // A bunch of supply pads (designated by name, # on each chip side) can be associated with the top module case class SupplyAnnotation( @@ -51,6 +57,7 @@ case class SupplyAnnotation( rightSide: Int = 0, topSide: Int = 0, bottomSide: Int = 0) + // The chip top should have a default pad side, a pad template file, and supply annotations case class ModulePadAnnotation( defaultPadSide: String = Top.serialize, @@ -63,17 +70,16 @@ case class ModulePadAnnotation( require(supplyPadNames.distinct.length == supplyPadNames.length, "Supply pads should only be specified once!") def getDefaultPadSide: PadSide = HasPadAnnotation.getSide(defaultPadSide) } + // Firrtl version -case class TargetModulePadAnnoF(target: ModuleName, anno: ModulePadAnnotation) extends FirrtlPadTransformAnnotation with SingleTargetAnnotation[ModuleName] { +case class TargetModulePadAnnoF(target: ModuleName, anno: ModulePadAnnotation) + extends FirrtlPadTransformAnnotation with SingleTargetAnnotation[ModuleName] { + def duplicate(n: ModuleName): TargetModulePadAnnoF = this.copy(target = n) - def getAnno = Annotation(target, classOf[AddIOPadsTransform], anno.serialize) - def targetName = target.name -} -// Chisel version -case class TargetModulePadAnnoC(target: Module, anno: ModulePadAnnotation) extends ChiselAnnotation { - def toFirrtl = TargetModulePadAnnoF(target.toNamed, anno) + def targetName: String = target.name } + case class CollectedAnnos( componentAnnos: Seq[TargetIOPadAnnoF], moduleAnnos: TargetModulePadAnnoF) { @@ -95,16 +101,34 @@ object HasPadAnnotation { case _ => throw new Exception(s" $a not a valid pad side annotation!") } + //TODO: PORT-1.4: Remove commented code +// def unapply(a: Annotation): Option[FirrtlPadTransformAnnotation] = a match { +// case Annotation(f, t, s) if t == classOf[AddIOPadsTransform] => f match { +// case m: ModuleName => +// Some(TargetModulePadAnnoF(m, s.parseYaml.convertTo[ModulePadAnnotation])) +// case c: ComponentName if s.contains(NoIOPadAnnotation().field) => +// Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[NoIOPadAnnotation])) +// case c: ComponentName => +// Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[IOPadAnnotation])) +// case _ => throw new Exception("Annotation only valid on module or component") +// } +// case _ => None +// } + + //scalastyle:off cyclomatic.complexity def unapply(a: Annotation): Option[FirrtlPadTransformAnnotation] = a match { - case Annotation(f, t, s) if t == classOf[AddIOPadsTransform] => f match { - case m: ModuleName => - Some(TargetModulePadAnnoF(m, s.parseYaml.convertTo[ModulePadAnnotation])) - case c: ComponentName if s.contains(NoIOPadAnnotation().field) => - Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[NoIOPadAnnotation])) - case c: ComponentName => - Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[IOPadAnnotation])) - case _ => throw new Exception("Annotation only valid on module or component") - } + case hasTransform: RunFirrtlTransform if hasTransform.transformClass == classOf[AddIOPadsTransform] => + hasTransform match { + case hasTarget: SingleTargetAnnotation[_] => + hasTarget.target match { + case m: ModuleName => + Some(TargetModulePadAnnoF(m, s.parseYaml.convertTo[ModulePadAnnotation])) + hasTarget match { + case _ => None + } + + + } case _ => None } @@ -113,8 +137,9 @@ object HasPadAnnotation { val padAnnos = annos.map(x => unapply(x)).flatten val targets = padAnnos.map(x => x.targetName) require(targets.distinct.length == targets.length, "Only 1 pad related annotation is allowed per component/module") - if (padAnnos.length == 0) None - else { + if (padAnnos.length == 0) { + None + } else { val moduleAnnosTemp = padAnnos.filter { case TargetModulePadAnnoF(_, _) => true case _ => false diff --git a/tapeout/src/test/scala/transforms/.clkgen/ClkGenSpec.scala b/tapeout/src/test/scala/transforms/.clkgen/ClkGenSpec.scala deleted file mode 100644 index 17ae1c764..000000000 --- a/tapeout/src/test/scala/transforms/.clkgen/ClkGenSpec.scala +++ /dev/null @@ -1,181 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.clkgen - -import chisel3._ -import firrtl._ -import org.scalatest.{FlatSpec, Matchers} -import chisel3.experimental._ -import chisel3.iotesters._ -import chisel3.util.HasBlackBoxInline -import barstools.tapeout.transforms.pads.TopModule - -// Purely to see that clk src tagging works with BBs -class FakeBBClk extends BlackBox with HasBlackBoxInline with IsClkModule { - val io = IO(new Bundle { - val inClk = Input(Clock()) - val outClk = Output(Vec(3, Clock())) - }) - - annotateClkPort(io.inClk, Sink()) - val generatedClks = io.outClk.map { case elt => - val id = getIOName(elt) - val srcId = getIOName(io.inClk) - annotateClkPort(elt.asInstanceOf[Element]) - GeneratedClk(id, Seq(srcId), Seq(0, 1, 2)) - }.toSeq - - annotateDerivedClks(ClkDiv, generatedClks) - - // Generates a "FakeBB.v" file with the following Verilog module - setInline("FakeBBClk.v", - s""" - |module FakeBBClk( - | input inClk, - | output outClk_0, - | output outClk_1, - | output outClk_2 - |); - | always @* begin - | outClk_0 = inClk; - | outClk_1 = inClk; - | outClk_2 = inClk; - | end - |endmodule - """.stripMargin) -} - -class ModWithNestedClkIO(numPhases: Int) extends Bundle { - val inClk = Input(Clock()) - val bbOutClk = Output(Vec(3, Clock())) - val clkDivOut = Output(Vec(numPhases, Clock())) -} - -class TestModWithNestedClkIO(numPhases: Int) extends Bundle { - val bbOutClk = Output(Vec(3, Bool())) - val clkDivOut = Output(Vec(numPhases, Bool())) -} - -class ModWithNestedClk(divBy: Int, phases: Seq[Int], syncReset: Boolean) extends Module { - - val io = IO(new ModWithNestedClkIO(phases.length)) - - val bb = Module(new FakeBBClk) - bb.io.inClk := io.inClk - io.bbOutClk := bb.io.outClk - val clkDiv = Module(new SEClkDivider(divBy, phases, syncReset = syncReset)) - clkDiv.io.reset := reset - clkDiv.io.inClk := io.inClk - phases.zipWithIndex.foreach { case (phase, idx) => io.clkDivOut(idx) := clkDiv.io.outClks(phase) } - -} - -class TopModuleWithClks(val divBy: Int, val phases: Seq[Int]) extends TopModule(usePads = false) { - val io = IO(new Bundle { - val gen1 = new TestModWithNestedClkIO(phases.length) - val gen2 = new TestModWithNestedClkIO(phases.length) - val gen3 = new TestModWithNestedClkIO(phases.length) - val fakeClk1 = Input(Clock()) - val fakeClk2 = Input(Clock()) - }) - - // TODO: Don't have to type Some - annotateClkPort(clock, - id = "clock", // not in io bundle - sink = Sink(Some(ClkSrc(period = 5.0, async = Seq(getIOName(io.fakeClk1))))) - ) - annotateClkPort(io.fakeClk1, Sink(Some(ClkSrc(period = 4.0)))) - annotateClkPort(io.fakeClk2, Sink(Some(ClkSrc(period = 3.0)))) - - // Most complicated: test chain of clock generators - val gen1 = Module(new ModWithNestedClk(divBy, phases, syncReset = true)) - io.gen1.bbOutClk := Vec(gen1.io.bbOutClk.map(x => x.asUInt)) - io.gen1.clkDivOut := Vec(gen1.io.clkDivOut.map(x => x.asUInt)) - gen1.io.inClk := clock - // ClkDiv on generated clk -> reset occurs before first input clk edge - val gen2 = Module(new ModWithNestedClk(divBy, phases, syncReset = false)) - io.gen2.bbOutClk := Vec(gen2.io.bbOutClk.map(x => x.asUInt)) - io.gen2.clkDivOut := Vec(gen2.io.clkDivOut.map(x => x.asUInt)) - gen2.io.inClk := gen1.io.clkDivOut.last - val gen3 = Module(new ModWithNestedClk(divBy, phases, syncReset = false)) - io.gen3.bbOutClk := Vec(gen3.io.bbOutClk.map(x => x.asUInt)) - io.gen3.clkDivOut := Vec(gen3.io.clkDivOut.map(x => x.asUInt)) - gen3.io.inClk := gen1.io.clkDivOut.last -} - -class TopModuleWithClksTester(c: TopModuleWithClks) extends PeekPokeTester(c) { - val maxT = c.divBy * c.divBy * 4 - val numSubClkOutputs = c.io.gen1.clkDivOut.length - val gen1Out = Seq.fill(numSubClkOutputs)(scala.collection.mutable.ArrayBuffer[Int]()) - val gen2Out = Seq.fill(numSubClkOutputs)(scala.collection.mutable.ArrayBuffer[Int]()) - val gen3Out = Seq.fill(numSubClkOutputs)(scala.collection.mutable.ArrayBuffer[Int]()) - reset(10) - for (t <- 0 until maxT) { - for (k <- 0 until numSubClkOutputs) { - gen1Out(k) += peek(c.io.gen1.clkDivOut(k)).intValue - gen2Out(k) += peek(c.io.gen2.clkDivOut(k)).intValue - gen3Out(k) += peek(c.io.gen3.clkDivOut(k)).intValue - } - step(1) - } - - val clkCounts = (0 until maxT) - val clkCountsModDiv = clkCounts.map(_ % c.divBy) - for (k <- 0 until numSubClkOutputs) { - val expected = clkCountsModDiv.map(x => if (x == c.phases(k)) 1 else 0) - expect(gen1Out(k) == expected, s"gen1Out($k) incorrect!") - println(s"gen1Out($k): \t${gen1Out(k).mkString("")}") - } - - val gen1ClkCounts = (0 until maxT/c.divBy).map(i => Seq.fill(c.divBy)(i)).flatten - val gen1ClkCountsModDiv = gen1ClkCounts.map(_ % c.divBy) - - for (k <- 0 until numSubClkOutputs) { - // Handle initial transient - val fillVal = if (c.phases.last == c.divBy - 1 && k == numSubClkOutputs - 1) 1 else 0 - val expected = Seq.fill(c.phases.last)(fillVal) ++ - gen1ClkCountsModDiv.map(x => if (x == c.phases(k)) 1 else 0).dropRight(c.phases.last) - expect(gen2Out(k) == expected, s"gen1Out($k) incorrect!") - println(s"gen2Out($k): \t${gen2Out(k).mkString("")}") - println(s"expected: \t${expected.mkString("")}") - } - - expect(gen2Out == gen3Out, "gen2Out should equal gen3Out") - -} - -class ClkGenSpec extends FlatSpec with Matchers { - - def readOutputFile(dir: String, f: String): String = - scala.io.Source.fromFile(Seq(dir, f).mkString("/")).getLines.mkString("\n") - def readResource(resource: String): String = { - val stream = getClass.getResourceAsStream(resource) - scala.io.Source.fromInputStream(stream).mkString - } - - def checkOutputs(dir: String) = { - } - - behavior of "top module with clk gens" - - it should "pass simple testbench" in { - val optionsManager = new TesterOptionsManager { - firrtlOptions = firrtlOptions.copy( - compilerName = "verilog" - /*annotations = List(passes.clocklist.ClockListAnnotation( - s"-c:TopModuleWithClks:-m:TopModuleWithClks:-o:test.clk" - )), - customTransforms = Seq(new passes.clocklist.ClockListTransform())*/ - ) - testerOptions = testerOptions.copy(isVerbose = false, backendName = "verilator", displayBase = 10) - commonOptions = commonOptions.copy(targetDirName = "test_run_dir/ClkTB") - } - // WARNING: TB requires that phase divBy - 1 should be at the end of the Seq to be OK during initial transient - iotesters.Driver.execute(() => new TopModuleWithClks(4, Seq(0, 1, 3)), optionsManager) { c => - val dir = optionsManager.commonOptions.targetDirName - checkOutputs(dir) - new TopModuleWithClksTester(c) - } should be (true) - } - -} \ No newline at end of file From 67de39e9579867574734de154ae1672d27976ee5 Mon Sep 17 00:00:00 2001 From: chick Date: Fri, 11 Sep 2020 17:06:19 -0700 Subject: [PATCH 174/273] Refactor tapeout for Chisel 3.4, Firrtl 1.4 - Remove clk package based on discussion with Colin - Annotations need to be refactored to using latest API - Generally that involves making annos generated by a anonymous ChiselAnnotation - The chisel annotations will use RunFirrtlTransform to queue up its associated transform - Chisel annotation provieds toFirrtl to generate Firrtl form of annotation - Usages of unapply on firrtl annotations cannot use generic unapply(target, transform, data) which has been eliminated - Have transforms use with DependencyAPIMigration to avoid deprecated `form`s - Added some 'see License comments - TechnologyLocation section of AddIOPadsSpec does not currently run because there is no content for it. - Added some tests for annotation serialization here --- .../transforms/.pads/AddIOPadsTransform.scala | 9 +- .../transforms/.pads/ChiselTopModule.scala | 18 +- .../transforms/.pads/FoundryPadsYaml.scala | 7 +- .../transforms/.pads/PadAnnotations.scala | 56 ++--- .../transforms/AddSuffixToModuleNames.scala | 5 +- .../scala/transforms/EnumerateModules.scala | 8 +- .../src/main/scala/transforms/Generate.scala | 9 +- .../main/scala/transforms/ResetInverter.scala | 26 +-- .../main/scala/transforms/retime/Retime.scala | 28 +-- .../scala/transforms/utils/FileUtils.scala | 45 ++-- .../transforms/.pads/AddIOPadsSpec.scala | 203 +++++++++++------- .../scala/transforms/ResetInverterSpec.scala | 27 +-- .../scala/transforms/retime/RetimeSpec.scala | 50 +++-- 13 files changed, 274 insertions(+), 217 deletions(-) diff --git a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala index b586e8bea..1d12adb8f 100644 --- a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala +++ b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala @@ -3,24 +3,19 @@ package barstools.tapeout.transforms.pads import firrtl._ -import firrtl.annotations._ import firrtl.passes._ -import firrtl.ir._ import barstools.tapeout.transforms._ import scala.collection.mutable // Main Add IO Pad transform operates on low Firrtl -class AddIOPadsTransform extends Transform with SeqTransformBased { - - override def inputForm: CircuitForm = LowForm - override def outputForm: CircuitForm = LowForm +class AddIOPadsTransform extends Transform with SeqTransformBased with DependencyAPIMigration { val transformList = new mutable.ArrayBuffer[Transform] def transforms: Seq[Transform] = transformList override def execute(state: CircuitState): CircuitState = { - val collectedAnnos = HasPadAnnotation(getMyAnnotations(state)) + val collectedAnnos = HasPadAnnotation(state.annotations) collectedAnnos match { // Transform not used case None => state diff --git a/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala b/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala index a348ff728..5b2ed28a0 100644 --- a/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala +++ b/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala @@ -77,7 +77,12 @@ abstract class TopModule( // Annotate IO with side + pad name def annotatePad(sig: Element, side: PadSide = defaultPadSide, name: String = ""): Unit = if (usePads) { val anno = IOPadAnnotation(side.serialize, name) - annotate(TargetIOPadAnnoC(sig, anno)) + annotate(new ChiselAnnotation with RunFirrtlTransform { + override def toFirrtl: Annotation = { + TargetIOPadAnnoF(sig.toTarget, anno) + } + def transformClass: Class[_ <: Transform] = classOf[AddIOPadsTransform] + }) } def annotatePad(sig: Aggregate, name: String): Unit = annotatePad(sig, side = defaultPadSide, name) def annotatePad(sig: Aggregate, side: PadSide): Unit = annotatePad(sig, side, name = "") @@ -86,7 +91,16 @@ abstract class TopModule( // There may be cases where pads were inserted elsewhere. If that's the case, allow certain IO to // not have pads auto added. Note that annotatePad and noPad are mutually exclusive! - def noPad(sig: Element): Unit = if (usePads) annotate(TargetIOPadAnnoC(sig, NoIOPadAnnotation())) + def noPad(sig: Element): Unit = { + if (usePads) { + annotate(new ChiselAnnotation with RunFirrtlTransform { + override def toFirrtl: Annotation = { + TargetIOPadAnnoF(sig.toTarget, NoIOPadAnnotation()) + } + def transformClass: Class[_ <: Transform] = classOf[AddIOPadsTransform] + }) + } + } def noPad(sig: Aggregate): Unit = extractElements(sig) foreach { x => noPad(x) } // Since this is a super class, this should be the first thing that gets run diff --git a/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala b/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala index 2d372a51f..ef6fdde73 100644 --- a/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala +++ b/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala @@ -1,3 +1,5 @@ +// See LICENSE for license details. + package barstools.tapeout.transforms.pads import net.jcazevedo.moultingyaml._ @@ -85,8 +87,9 @@ object FoundryPadsYaml extends DefaultYamlProtocol { implicit val _pad = yamlFormat6(FoundryPad) def parse(techDir: String): Seq[FoundryPad] = { val file = techDir + exampleResource - if(techDir != "" && !(new java.io.File(file)).exists()) - throw new Exception("Technology directory must contain FoundryPads.yaml!") + if(techDir != "" && !(new java.io.File(file)).exists()) { + throw new Exception(s"Technology directory $techDir must contain FoundryPads.yaml!") + } val out = (new YamlFileReader(exampleResource)).parse[FoundryPad](if (techDir == "") "" else file) val padNames = out.map(x => x.correctedName) require(padNames.distinct.length == padNames.length, "Pad names must be unique!") diff --git a/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala b/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala index c1f2d7835..7ca497995 100644 --- a/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala +++ b/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala @@ -3,9 +3,6 @@ package barstools.tapeout.transforms.pads import firrtl.annotations._ -import chisel3.experimental._ -import chisel3._ - import net.jcazevedo.moultingyaml._ object PadAnnotationsYaml extends DefaultYamlProtocol { @@ -13,6 +10,17 @@ object PadAnnotationsYaml extends DefaultYamlProtocol { implicit val _noiopad = yamlFormat1(NoIOPadAnnotation) implicit val _supplyanno = yamlFormat5(SupplyAnnotation) implicit val _modulepadanno = yamlFormat4(ModulePadAnnotation) + + // Putting these serialize methods here seems to fix warnings about missing implicits for the toYaml + def serialize(noIOPad: NoIOPadAnnotation): String = { + noIOPad.toYaml.prettyPrint + } + def serialize(ioPadAnnotation: IOPadAnnotation): String = { + ioPadAnnotation.toYaml.prettyPrint + } + def serialize(modulePadAnnotation: ModulePadAnnotation): String = { + modulePadAnnotation.toYaml.prettyPrint + } } abstract class FirrtlPadTransformAnnotation { @@ -25,14 +33,12 @@ abstract class IOAnnotation { } case class IOPadAnnotation(padSide: String, padName: String) extends IOAnnotation { - import PadAnnotationsYaml._ - def serialize: String = this.toYaml.prettyPrint + def serialize: String = PadAnnotationsYaml.serialize(this) def getPadSide: PadSide = HasPadAnnotation.getSide(padSide) } case class NoIOPadAnnotation(noPad: String = "") extends IOAnnotation { - import PadAnnotationsYaml._ - def serialize: String = this.toYaml.prettyPrint + def serialize: String = PadAnnotationsYaml.serialize(this) def field: String = "noPad:" } @@ -44,12 +50,6 @@ case class TargetIOPadAnnoF(target: ComponentName, anno: IOAnnotation) def targetName: String = target.name } -//TODO: PORT-1.4: Remove commented code -// Chisel version -//case class TargetIOPadAnnoC(target: Element, anno: IOAnnotation) extends ChiselAnnotation { -// def toFirrtl = TargetIOPadAnnoF(target.toNamed, anno) -//} - // A bunch of supply pads (designated by name, # on each chip side) can be associated with the top module case class SupplyAnnotation( padName: String, @@ -64,9 +64,9 @@ case class ModulePadAnnotation( coreWidth: Int = 0, coreHeight: Int = 0, supplyAnnos: Seq[SupplyAnnotation] = Seq.empty) { - import PadAnnotationsYaml._ - def serialize: String = this.toYaml.prettyPrint - val supplyPadNames = supplyAnnos.map(_.padName) + + def serialize: String = PadAnnotationsYaml.serialize(this) + def supplyPadNames: Seq[String] = supplyAnnos.map(_.padName) require(supplyPadNames.distinct.length == supplyPadNames.length, "Supply pads should only be specified once!") def getDefaultPadSide: PadSide = HasPadAnnotation.getSide(defaultPadSide) } @@ -91,7 +91,6 @@ case class CollectedAnnos( } object HasPadAnnotation { - import PadAnnotationsYaml._ def getSide(a: String): PadSide = a match { case i if i == Left.serialize => Left @@ -115,26 +114,13 @@ object HasPadAnnotation { // case _ => None // } - //scalastyle:off cyclomatic.complexity - def unapply(a: Annotation): Option[FirrtlPadTransformAnnotation] = a match { - case hasTransform: RunFirrtlTransform if hasTransform.transformClass == classOf[AddIOPadsTransform] => - hasTransform match { - case hasTarget: SingleTargetAnnotation[_] => - hasTarget.target match { - case m: ModuleName => - Some(TargetModulePadAnnoF(m, s.parseYaml.convertTo[ModulePadAnnotation])) - hasTarget match { - case _ => None - } - - - } - case _ => None - } - def apply(annos: Seq[Annotation]): Option[CollectedAnnos] = { // Get all pad-related annotations (config files, pad sides, pad names, etc.) - val padAnnos = annos.map(x => unapply(x)).flatten + val padAnnos = annos.flatMap { + case a: TargetModulePadAnnoF => Some(a) + case a: TargetIOPadAnnoF => Some(a) + case _ => None + } val targets = padAnnos.map(x => x.targetName) require(targets.distinct.length == targets.length, "Only 1 pad related annotation is allowed per component/module") if (padAnnos.length == 0) { diff --git a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala index 0e1a3739d..ff8c18578 100644 --- a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala +++ b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala @@ -7,7 +7,6 @@ import firrtl.ir._ import firrtl.annotations._ import firrtl.Mappers._ - case class KeepNameAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { def duplicate(n: ModuleTarget) = this.copy(n) @@ -22,9 +21,7 @@ case class ModuleNameSuffixAnnotation(target: CircuitTarget, suffix: String) // Verilog black box and therefore can't be renamed. Since the point is to // allow FIRRTL to be linked together using "cat" and ExtModules don't get // emitted, this should be safe. -class AddSuffixToModuleNames extends Transform { - def inputForm = LowForm - def outputForm = LowForm +class AddSuffixToModuleNames extends Transform with DependencyAPIMigration { def processAnnos(annos: AnnotationSeq): (AnnotationSeq, (String) => String) = { val whitelist = annos.collect({ case KeepNameAnnotation(tgt) => tgt.module }).toSet diff --git a/tapeout/src/main/scala/transforms/EnumerateModules.scala b/tapeout/src/main/scala/transforms/EnumerateModules.scala index 11da911e3..4bd2855f1 100644 --- a/tapeout/src/main/scala/transforms/EnumerateModules.scala +++ b/tapeout/src/main/scala/transforms/EnumerateModules.scala @@ -20,10 +20,10 @@ class EnumerateModulesPass(enumerate: (Module) => Unit) extends Pass { } } -class EnumerateModules(enumerate: (Module) => Unit) extends Transform with SeqTransformBased { - def inputForm = LowForm - def outputForm = LowForm - def transforms = Seq(new EnumerateModulesPass(enumerate)) +class EnumerateModules(enumerate: (Module) => Unit) + extends Transform with SeqTransformBased with DependencyAPIMigration { + + def transforms: Seq[Transform] = Seq(new EnumerateModulesPass(enumerate)) def execute(state: CircuitState): CircuitState = { val ret = runTransforms(state) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 89df8b556..b48f47e15 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -1,16 +1,11 @@ package barstools.tapeout.transforms import firrtl._ -import firrtl.ir._ import firrtl.annotations._ -import firrtl.stage.FirrtlCircuitAnnotation -import firrtl.passes.Pass - -import java.io.File -import firrtl.annotations.AnnotationYamlProtocol._ +import firrtl.ir._ import firrtl.passes.memlib.ReplSeqMemAnnotation +import firrtl.stage.FirrtlCircuitAnnotation import firrtl.transforms.BlackBoxResourceFileNameAnno -import net.jcazevedo.moultingyaml._ import logger.LazyLogging trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index 08d849835..e6bdad458 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -2,19 +2,15 @@ package barstools.tapeout.transforms -import chisel3.internal.InstanceId +import chisel3.experimental.RunFirrtlTransform import firrtl.PrimOps.Not -import firrtl.annotations.{Annotation, CircuitName, ModuleName, Named} -import firrtl.ir.{Input, UIntType, IntWidth, Module, Port, DefNode, NoInfo, Reference, DoPrim, Block, Circuit} +import firrtl.annotations.{Annotation, CircuitName, ModuleName, SingleTargetAnnotation} +import firrtl.ir._ import firrtl.passes.Pass -import firrtl.{CircuitForm, CircuitState, LowForm, Transform} +import firrtl.{CircuitState, DependencyAPIMigration, Transform} -object ResetInverterAnnotation { - def apply(target: ModuleName): Annotation = Annotation(target, classOf[ResetInverterTransform], "invert") - def unapply(a: Annotation): Option[Named] = a match { - case Annotation(m, t, "invert") if t == classOf[ResetInverterTransform] => Some(m) - case _ => None - } +case class ResetInverterAnnotation(target: ModuleName) extends SingleTargetAnnotation[ModuleName] { + override def duplicate(n: ModuleName): Annotation = ResetInverterAnnotation(n) } object ResetN extends Pass { @@ -42,12 +38,9 @@ object ResetN extends Pass { } } -class ResetInverterTransform extends Transform { - override def inputForm: CircuitForm = LowForm - override def outputForm: CircuitForm = LowForm - +class ResetInverterTransform extends Transform with DependencyAPIMigration { override def execute(state: CircuitState): CircuitState = { - getMyAnnotations(state) match { + state.annotations.filter(_.isInstanceOf[ResetInverterAnnotation]) match { case Nil => state case Seq(ResetInverterAnnotation(ModuleName(state.circuit.main, CircuitName(_)))) => state.copy(circuit = ResetN.run(state.circuit)) @@ -60,7 +53,8 @@ class ResetInverterTransform extends Transform { trait ResetInverter { self: chisel3.Module => def invert[T <: chisel3.internal.LegacyModule](module: T): Unit = { - chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation{ + chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation with RunFirrtlTransform { + def transformClass: Class[_ <: Transform] = classOf[ResetInverterTransform] def toFirrtl: Annotation = ResetInverterAnnotation(module.toNamed) }) } diff --git a/tapeout/src/main/scala/transforms/retime/Retime.scala b/tapeout/src/main/scala/transforms/retime/Retime.scala index 231687bf4..f790f7b3c 100644 --- a/tapeout/src/main/scala/transforms/retime/Retime.scala +++ b/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -2,27 +2,17 @@ package barstools.tapeout.transforms.retime -import chisel3.internal.InstanceId -import firrtl.PrimOps.Not -import firrtl.annotations.{Annotation, CircuitName, ModuleName, Named, ComponentName} -import firrtl.ir.{Input, UIntType, IntWidth, Module, Port, DefNode, NoInfo, Reference, DoPrim, Block, Circuit} -import firrtl.passes.Pass -import firrtl.{CircuitForm, CircuitState, LowForm, Transform} +import chisel3.experimental.RunFirrtlTransform +import firrtl.annotations._ +import firrtl.{CircuitState, DependencyAPIMigration, Transform} -object RetimeAnnotation { - def apply(target: ModuleName): Annotation = Annotation(target, classOf[RetimeTransform], "retime") - def unapply(a: Annotation): Option[Named] = a match { - case Annotation(m, t, "retime") if t == classOf[RetimeTransform] => Some(m) - case _ => None - } +case class RetimeAnnotation(target: Named) extends SingleTargetAnnotation[Named] { + override def duplicate(n: Named): Annotation = RetimeAnnotation(n) } -class RetimeTransform extends Transform { - override def inputForm: CircuitForm = LowForm - override def outputForm: CircuitForm = LowForm - +class RetimeTransform extends Transform with DependencyAPIMigration { override def execute(state: CircuitState): CircuitState = { - getMyAnnotations(state) match { + state.annotations.filter(_.isInstanceOf[RetimeAnnotation]) match { case Nil => state case seq => seq.foreach { case RetimeAnnotation(ModuleName(module, CircuitName(_))) => @@ -39,8 +29,10 @@ class RetimeTransform extends Transform { trait RetimeLib { self: chisel3.Module => + def retime[T <: chisel3.internal.LegacyModule](module: T): Unit = { - chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation{ + chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation with RunFirrtlTransform { + def transformClass: Class[_ <: Transform] = classOf[RetimeTransform] def toFirrtl: Annotation = RetimeAnnotation(module.toNamed) }) } diff --git a/tapeout/src/main/scala/transforms/utils/FileUtils.scala b/tapeout/src/main/scala/transforms/utils/FileUtils.scala index 7baf3e146..76ee45ec1 100644 --- a/tapeout/src/main/scala/transforms/utils/FileUtils.scala +++ b/tapeout/src/main/scala/transforms/utils/FileUtils.scala @@ -1,9 +1,11 @@ +// See LICENSE for license details. + package barstools.tapeout.transforms +import chisel3.experimental.{ChiselAnnotation, annotate} import firrtl._ import firrtl.annotations._ -import firrtl.passes._ -import firrtl.ir._ +import firrtl.transforms.BlackBoxTargetDirAnno object WriteConfig { def apply(dir: String, file: String, contents: String): Unit = { @@ -17,8 +19,7 @@ object GetTargetDir { def apply(state: CircuitState): String = { val annos = state.annotations val destDir = annos.map { - case Annotation(f, t, s) if t == classOf[firrtl.transforms.BlackBoxTargetDirAnno] => - Some(s) + case BlackBoxTargetDirAnno(s) => Some(s) case _ => None }.flatten val loc = { @@ -31,27 +32,39 @@ object GetTargetDir { } } -// Fake transform just to track Technology information directory -object TechnologyLocation { - def apply(dir: String): Annotation = { - Annotation(CircuitName("All"), classOf[TechnologyLocation], dir) +trait HasSetTechnologyLocation { + self: chisel3.Module => + + def setTechnologyLocation(dir: String) { + annotate(new ChiselAnnotation { + override def toFirrtl: Annotation = { + TechnologyLocationAnnotation(dir) + } + }) } } -class TechnologyLocation extends Transform { - def inputForm: CircuitForm = LowForm - def outputForm: CircuitForm = LowForm - def execute(state: CircuitState) = throw new Exception("Technology Location transform execution doesn't work!") + +case class TechnologyLocationAnnotation(dir: String) extends SingleTargetAnnotation[CircuitName] { + val target: CircuitName = CircuitName("All") + override def duplicate(n: CircuitName): Annotation = TechnologyLocationAnnotation(dir) +} + +class TechnologyLocation extends Transform with DependencyAPIMigration { + def execute(state: CircuitState): CircuitState = { + throw new Exception("Technology Location transform execution doesn't work!") + } + def get(state: CircuitState): String = { val annos = state.annotations - val dir = annos.map { - case Annotation(f, t, s) if t == classOf[TechnologyLocation] => Some(s) + val dir = annos.flatMap { + case TechnologyLocationAnnotation(dir) => Some(dir) case _ => None - }.flatten + } dir.length match { case 0 => "" case 1 => val targetDir = new java.io.File(dir.head) - if(!targetDir.exists()) throw new Exception("Technology yaml directory doesn't exist!") + if(!targetDir.exists()) throw new Exception(s"Technology yaml directory $targetDir doesn't exist!") dir.head case _ => throw new Exception("Only 1 tech directory annotation allowed!") } diff --git a/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala b/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala index b578be974..438895395 100644 --- a/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala +++ b/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala @@ -2,64 +2,74 @@ package barstools.tapeout.transforms.pads +import java.io.File + +import barstools.tapeout.transforms.HasSetTechnologyLocation import chisel3._ -import firrtl._ -import org.scalatest.{FlatSpec, Matchers} import chisel3.experimental._ -import chisel3.util.HasBlackBoxInline import chisel3.iotesters._ +import chisel3.util.HasBlackBoxInline +import firrtl._ +import org.scalatest.{FlatSpec, Matchers} class BB extends BlackBox with HasBlackBoxInline { val io = IO(new Bundle { val c = Input(SInt(14.W)) val z = Output(SInt(16.W)) val analog1 = Analog(3.W) - val analog2 = analog1.chiselCloneType + val analog2 = Analog(3.W) }) // Generates a "FakeBB.v" file with the following Verilog module - setInline("FakeBB.v", + setInline( + "FakeBB.v", s""" - |module BB( - | input [15:0] c, - | output [15:0] z, - | inout [2:0] analog1, - | inout [2:0] analog2 - |); - | always @* begin - | z = 2 * c; - | analog2 = analog1 + 1; - | end - |endmodule - """.stripMargin) + |module BB( + | input [15:0] c, + | output [15:0] z, + | inout [2:0] analog1, + | inout [2:0] analog2 + |); + | always @* begin + | z = 2 * c; + | analog2 = analog1 + 1; + | end + |endmodule + """.stripMargin + ) } // If no template file is provided, it'll use the default one (example) in the resource folder // Default pad side is Top if no side is specified for a given IO // You can designate the number of different supply pads on each chip side -class ExampleTopModuleWithBB extends TopModule( - supplyAnnos = Seq( - SupplyAnnotation(padName = "vdd", leftSide = 3, bottomSide = 2), - SupplyAnnotation(padName = "vss", rightSide = 1) - )) { +class ExampleTopModuleWithBB + extends TopModule( + supplyAnnos = Seq( + SupplyAnnotation(padName = "vdd", leftSide = 3, bottomSide = 2), + SupplyAnnotation(padName = "vss", rightSide = 1) + ) + ) + with HasSetTechnologyLocation { val io = IO(new Bundle { val a = Input(UInt(15.W)) - val b = a.chiselCloneType + val b = Input(a.cloneType) val c = Input(SInt(14.W)) val x = Output(UInt(16.W)) - val y = x.chiselCloneType + val y = Output(x.cloneType) val z = Output(SInt(16.W)) val analog1 = Analog(3.W) - val analog2 = analog1.chiselCloneType + val analog2 = analog1.cloneType val v = Output(Vec(3, UInt(5.W))) }) + setTechnologyLocation("./RealTech") + // Can annotate aggregates with pad side location + pad name (should be a name in the yaml template) annotatePad(io.v, Right, "from_tristate_foundry") // Can annotate individual elements annotatePad(io.analog1, Left, "fast_custom") annotatePad(io.analog2, Bottom, "slow_foundry") // Looks for a pad that matches the IO type (digital in, digital out, analog) if no name is specified - Seq(io.a, io.b, io.c, io.x) foreach { x => annotatePad(x, Left) } + Seq(io.a, io.b, io.c, io.x).foreach { x => annotatePad(x, Left) } // Some signals might not want pads associated with them noPad(io.y) // Clk might come directly from bump @@ -74,8 +84,8 @@ class ExampleTopModuleWithBB extends TopModule( io.x := io.a + 1.U io.y := io.b - 1.U - io.v foreach { lhs => lhs := io.a } - + io.v.foreach { lhs => lhs := io.a } + } class SimpleTopModuleTester(c: ExampleTopModuleWithBB) extends PeekPokeTester(c) { @@ -89,10 +99,10 @@ class SimpleTopModuleTester(c: ExampleTopModuleWithBB) extends PeekPokeTester(c) expect(c.io.x, ax(i) + 1) expect(c.io.y, bx(i) - 1) expect(c.io.z, 2 * cx(i)) - c.io.v foreach { out => expect(out, ax(i)) } + c.io.v.foreach { out => expect(out, ax(i)) } } - // Analog can't be peeked + poked -} + // Analog can't be peeked + poked +} // Notes: Annotations // a in 15: left, default digital @@ -110,7 +120,7 @@ class SimpleTopModuleTester(c: ExampleTopModuleWithBB) extends PeekPokeTester(c) // vdd, bottom: 2, group of 1 // vss, right: 1, group of 2 // Notes: Used pads -// digital horizontal (from_tristate_foundry) +// digital horizontal (from_tristate_foundry) // in + out // analog fast_custom horizontal // analog slow_foundry vertical @@ -122,72 +132,111 @@ class SimpleTopModuleTester(c: ExampleTopModuleWithBB) extends PeekPokeTester(c) class IOPadSpec extends FlatSpec with Matchers { - def readOutputFile(dir: String, f: String): String = - scala.io.Source.fromFile(Seq(dir, f).mkString("/")).getLines.mkString("\n") + def readOutputFile(dir: String, f: String): String = { + FileUtils.getText(dir + File.separator + f) + } def readResource(resource: String): String = { val stream = getClass.getResourceAsStream(resource) scala.io.Source.fromInputStream(stream).mkString } - def checkOutputs(dir: String) = { + def checkOutputs(dir: String): Unit = { // Show that black box source helper is run //readOutputFile(dir, "black_box_verilog_files.f") should include ("pad_supply_vdd_horizontal.v") val padBBEx = s"""// Digital Pad Example - |// Signal Direction: Input - |// Pad Orientation: Horizontal - |// Call your instance PAD - |module pad_digital_from_tristate_foundry_horizontal_input( - | input in, - | output reg out - |); - | // Where you would normally dump your pad instance - | always @* begin - | out = in; - | end - |endmodule - | - |module pad_digital_from_tristate_foundry_horizontal_input_array #( - | parameter int WIDTH=1 - |)( - | input [WIDTH-1:0] in, - | output reg [WIDTH-1:0] out - |); - | pad_digital_from_tristate_foundry_horizontal_input pad_digital_from_tristate_foundry_horizontal_input[WIDTH-1:0]( - | .in(in), - | .out(out) - | );""".stripMargin + |// Signal Direction: Input + |// Pad Orientation: Horizontal + |// Call your instance PAD + |module pad_digital_from_tristate_foundry_horizontal_input( + | input in, + | output reg out + |); + | // Where you would normally dump your pad instance + | always @* begin + | out = in; + | end + |endmodule + | + |module pad_digital_from_tristate_foundry_horizontal_input_array #( + | parameter int WIDTH=1 + |)( + | input [WIDTH-1:0] in, + | output reg [WIDTH-1:0] out + |); + | pad_digital_from_tristate_foundry_horizontal_input pad_digital_from_tristate_foundry_horizontal_input[WIDTH-1:0]( + | .in(in), + | .out(out) + | );""".stripMargin // Make sure black box templating is OK - readOutputFile(dir, "pad_digital_from_tristate_foundry_horizontal_input_array.v") should include (padBBEx) + readOutputFile(dir, "pad_digital_from_tristate_foundry_horizontal_input_array.v") should include(padBBEx) - val verilog = readOutputFile(dir, "ExampleTopModuleWithBB.v") + val verilog = readOutputFile(dir, "ExampleTopModuleWithBB.v") // Pad frame + top should be exact - verilog should include (readResource("/PadAnnotationVerilogPart.v")) + verilog should include(readResource("/PadAnnotationVerilogPart.v")) // Pad Placement IO file should be exact val padIO = readOutputFile(dir, "pads.io") padIO should include(readResource("/PadPlacement.io")) } - behavior of "top module with blackbox" + behavior.of("Pad Annotations") - import barstools.tapeout.transforms._ + it should "serialize pad annotations" in { + val noIOPadAnnotation = NoIOPadAnnotation("dog") + noIOPadAnnotation.serialize should include("noPad: dog") + + val ioPadAnnotation = IOPadAnnotation("left", "oliver") + ioPadAnnotation.serialize should include( + """padSide: left + |padName: oliver + |""".stripMargin) + + val modulePadAnnotation = ModulePadAnnotation( + "top", + 11, + 42, + Seq( + SupplyAnnotation("mypad, 1, 2 ,3 , 4"), + SupplyAnnotation("yourpad, 9, 8, 7, 6") + ) + ) + + modulePadAnnotation.serialize should be( + """defaultPadSide: top + |coreWidth: 11 + |coreHeight: 42 + |supplyAnnos: + |- rightSide: 0 + | padName: mypad, 1, 2 ,3 , 4 + | leftSide: 0 + | bottomSide: 0 + | topSide: 0 + |- rightSide: 0 + | padName: yourpad, 9, 8, 7, 6 + | leftSide: 0 + | bottomSide: 0 + | topSide: 0 + |""".stripMargin + ) + } + + behavior.of("top module with blackbox") it should "pass simple testbench" in { val optionsManager = new TesterOptionsManager { firrtlOptions = firrtlOptions.copy( compilerName = "verilog" - // annotations = List(TechnologyLocation("./RealTech")) ) testerOptions = testerOptions.copy(isVerbose = true, backendName = "verilator", displayBase = 10) commonOptions = commonOptions.copy(targetDirName = "test_run_dir/PadsTB") } iotesters.Driver.execute(() => new ExampleTopModuleWithBB, optionsManager) { c => val dir = optionsManager.commonOptions.targetDirName - checkOutputs(dir) + checkOutputs(dir) new SimpleTopModuleTester(c) - } should be (true) + } should be(true) } -/* + /* it should "create proper IO pads + black box in low firrtl" in { val optionsManager = new ExecutionOptionsManager("barstools") with HasChiselExecutionOptions with HasFirrtlOptions { firrtlOptions = firrtlOptions.copy(compilerName = "low") @@ -196,15 +245,15 @@ class IOPadSpec extends FlatSpec with Matchers { } val success = chisel3.Driver.execute(optionsManager, () => new ExampleTopModuleWithBB) match { case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, firrtl))) => - firrtl should include ("ExampleTopModuleWithBB_PadFrame") + firrtl should include ("ExampleTopModuleWithBB_PadFrame") firrtl should include ("ExampleTopModuleWithBB_Internal") - firrtl should not include ("FakeBBPlaceholder") + firrtl should not include ("FakeBBPlaceholder") true case _ => false - } + } success should be (true) - } -*/ + } + */ it should "create proper IO pads + black box in verilog" in { val optionsManager = new ExecutionOptionsManager("barstools") with HasChiselExecutionOptions with HasFirrtlOptions { firrtlOptions = firrtlOptions.copy( @@ -214,13 +263,13 @@ class IOPadSpec extends FlatSpec with Matchers { //commonOptions = commonOptions.copy(globalLogLevel = logger.LogLevel.Info) } val success = chisel3.Driver.execute(optionsManager, () => new ExampleTopModuleWithBB) match { - case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, verilog))) => + case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, verilog))) => true case _ => false - } - success should be (true) + } + success should be(true) val dir = optionsManager.commonOptions.targetDirName checkOutputs(dir) - } + } -} \ No newline at end of file +} diff --git a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala index 07fca3028..4b5de9677 100644 --- a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala @@ -3,7 +3,7 @@ package barstools.tapeout.transforms import chisel3._ -import firrtl._ +import chisel3.stage.ChiselStage import org.scalatest.{FreeSpec, Matchers} class ExampleModuleNeedsResetInverted extends Module with ResetInverter { @@ -19,22 +19,15 @@ class ExampleModuleNeedsResetInverted extends Module with ResetInverter { } class ResetNSpec extends FreeSpec with Matchers { - "Inverting reset needs to be done throughout module" in { - val optionsManager = new ExecutionOptionsManager("dsptools") with HasChiselExecutionOptions with HasFirrtlOptions { - firrtlOptions = firrtlOptions.copy(compilerName = "low", customTransforms = List(new ResetInverterTransform)), - } - chisel3.Driver.execute(optionsManager, () => new ExampleModuleNeedsResetInverted) match { - case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, firrtl))) => - chirrtl should include ("input reset :") - chirrtl should not include "input reset_n :" - chirrtl should not include "node reset = not(reset_n)" - - firrtl should include ("input reset_n :") - firrtl should include ("node reset = not(reset_n)") - firrtl should not include "input reset :" - case _ => - // bad - } + val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array()) + chirrtl should include("input reset :") + (chirrtl should not).include("input reset_n :") + (chirrtl should not).include("node reset = not(reset_n)") + + val firrtl = (new ChiselStage).emitFirrtl(new ExampleModuleNeedsResetInverted, Array("-X", "low")) + firrtl should include("input reset_n :") + firrtl should include("node reset = not(reset_n)") + (firrtl should not).include("input reset :") } } diff --git a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala index 76223b717..1f2de5a88 100644 --- a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala +++ b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala @@ -2,13 +2,12 @@ package barstools.tapeout.transforms.retime.test +import barstools.tapeout.transforms.retime._ import chisel3._ +import chisel3.stage.ChiselStage import firrtl._ +import logger.Logger import org.scalatest.{FlatSpec, Matchers} -import chisel3.experimental._ -import chisel3.util.HasBlackBoxInline -import chisel3.iotesters._ -import barstools.tapeout.transforms.retime._ class RetimeSpec extends FlatSpec with Matchers { def normalized(s: String): String = { @@ -25,20 +24,47 @@ class RetimeSpec extends FlatSpec with Matchers { it should "pass simple retime module annotation" in { val gen = () => new RetimeModule() val dir = uniqueDirName(gen, "RetimeModule") - chisel3.Driver.execute(Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final"), gen) shouldBe a [ChiselExecutionSuccess] - val lines = io.Source.fromFile(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json").getLines().map(normalized).mkString("\n") - lines should include("barstools.tapeout.transforms.retime.RetimeTransform") + Logger.makeScope(Seq.empty) { + val captor = new Logger.OutputCaptor + Logger.setOutput(captor.printStream) + val firrtl = (new ChiselStage).emitFirrtl( + new RetimeModule(), + Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info") + ) + firrtl.nonEmpty should be(true) + //Make sure we got the RetimeTransform scheduled + captor.getOutputAsString should include ("barstools.tapeout.transforms.retime.RetimeTransform") + } + + val lines = FileUtils.getLines(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json") + .map(normalized) + .mkString("\n") + lines should include("barstools.tapeout.transforms.retime.RetimeAnnotation") + lines should include(""""target":"RetimeModule.RetimeModule"""") } - // TODO(azidar): need to fix/add instance annotations - ignore should "pass simple retime instance annotation" in { + it should "pass simple retime instance annotation" in { val gen = () => new RetimeInstance() val dir = uniqueDirName(gen, "RetimeInstance") - chisel3.Driver.execute(Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final.anno"), gen) shouldBe a [ChiselExecutionSuccess] - val lines = io.Source.fromFile(s"test_run_dir/$dir/final.anno").getLines().map(normalized).toSeq - lines should contain ("Annotation(ComponentName(instance, ModuleName(RetimeInstance,CircuitName(RetimeInstance))),class barstools.tapeout.transforms.retime.RetimeTransform,retime)") + Logger.makeScope(Seq.empty) { + val captor = new Logger.OutputCaptor + Logger.setOutput(captor.printStream) + val firrtl = (new ChiselStage).emitFirrtl( + new RetimeInstance(), + Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info") + ) + firrtl.nonEmpty should be(true) + //Make sure we got the RetimeTransform scheduled + captor.getOutputAsString should include ("barstools.tapeout.transforms.retime.RetimeTransform") + } + + val lines = FileUtils.getLines(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json") + .map(normalized) + .mkString("\n") + lines should include("barstools.tapeout.transforms.retime.RetimeAnnotation") + lines should include(""""target":"RetimeInstance.MyModule"""") } } From d06d8cc16ce1132867359774bb2fb30d4ab676e3 Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 14 Sep 2020 09:32:18 -0700 Subject: [PATCH 175/273] - FoundryPadsYaml would not parse yaml - Made separate case class for data - Now parses - Fails later with UnknownType in firrt compiler - Fixed similar parsing problem with PadPlacement --- .../transforms/.pads/FoundryPadsYaml.scala | 63 ++++++++++++++----- .../transforms/.pads/AddIOPadsSpec.scala | 20 ++---- 2 files changed, 53 insertions(+), 30 deletions(-) diff --git a/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala b/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala index ef6fdde73..ce19a6d7f 100644 --- a/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala +++ b/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala @@ -8,13 +8,32 @@ import firrtl._ import firrtl.ir._ import barstools.tapeout.transforms._ +trait HasFoundryPadFields { + val tpe: String + val name: String + val width: Int + val height: Int + val supplySetNum: Option[Int] + val verilog: String +} + +case class FoundryPadFields( + tpe: String, + name: String, + width: Int, + height: Int, + supplySetNum: Option[Int], + verilog: String) + extends HasFoundryPadFields + case class FoundryPad( - tpe: String, - name: String, - width: Int, - height: Int, - supplySetNum: Option[Int], - verilog: String) { + tpe: String, + name: String, + width: Int, + height: Int, + supplySetNum: Option[Int], + verilog: String) + extends HasFoundryPadFields { def padInstName = "PAD" @@ -38,8 +57,10 @@ case class FoundryPad( // Supply pads don't have IO require(!verilog.contains("{{#if isInput}}"), "Supply pad template must not contain '{{#if isInput}}'") require( - verilog.contains(s"${padInstName}["), "All supply pad templates should have instance arrays" + - " called ${padInstName}[n:0], where n = ${getSupplySetNum-1}") + verilog.contains(s"${padInstName}["), + "All supply pad templates should have instance arrays" + + " called ${padInstName}[n:0], where n = ${getSupplySetNum-1}" + ) require(supplySetNum.nonEmpty, "# of grouped supply pads 'supplySetNum' should be specified!") SupplyPad case _ => throw new Exception("Illegal pad type in config!") @@ -53,14 +74,14 @@ case class FoundryPad( private[barstools] val correctedName = name.replace(" ", "_") case class TemplateParams( - // isInput only used with digital pads - isInput: Boolean, - isHorizontal: Boolean) { + // isInput only used with digital pads + isInput: Boolean, + isHorizontal: Boolean) { private val orient = if (isHorizontal) Horizontal.serialize else Vertical.serialize private val dir = padType match { - case AnalogPad => "inout" - case SupplyPad => "none" + case AnalogPad => "inout" + case SupplyPad => "none" case DigitalPad => if (isInput) Input.serialize else Output.serialize } val name = { @@ -84,13 +105,23 @@ case class FoundryPad( object FoundryPadsYaml extends DefaultYamlProtocol { val exampleResource = "/FoundryPads.yaml" - implicit val _pad = yamlFormat6(FoundryPad) + implicit val _pad = yamlFormat6(FoundryPadFields) def parse(techDir: String): Seq[FoundryPad] = { val file = techDir + exampleResource - if(techDir != "" && !(new java.io.File(file)).exists()) { + if (techDir != "" && !(new java.io.File(file)).exists()) { throw new Exception(s"Technology directory $techDir must contain FoundryPads.yaml!") } - val out = (new YamlFileReader(exampleResource)).parse[FoundryPad](if (techDir == "") "" else file) + val fieldsArray = (new YamlFileReader(exampleResource)).parse[FoundryPadFields](if (techDir == "") "" else file) + val out = fieldsArray.map { fields => + FoundryPad( + tpe = fields.tpe, + name = fields.name, + width = fields.width, + height = fields.height, + supplySetNum = fields.supplySetNum, + verilog = fields.verilog + ) + } val padNames = out.map(x => x.correctedName) require(padNames.distinct.length == padNames.length, "Pad names must be unique!") out diff --git a/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala b/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala index 438895395..7fe423422 100644 --- a/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala +++ b/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala @@ -8,6 +8,7 @@ import barstools.tapeout.transforms.HasSetTechnologyLocation import chisel3._ import chisel3.experimental._ import chisel3.iotesters._ +import chisel3.stage.ChiselStage import chisel3.util.HasBlackBoxInline import firrtl._ import org.scalatest.{FlatSpec, Matchers} @@ -255,20 +256,11 @@ class IOPadSpec extends FlatSpec with Matchers { } */ it should "create proper IO pads + black box in verilog" in { - val optionsManager = new ExecutionOptionsManager("barstools") with HasChiselExecutionOptions with HasFirrtlOptions { - firrtlOptions = firrtlOptions.copy( - compilerName = "verilog" - ) - commonOptions = commonOptions.copy(targetDirName = "test_run_dir/PadsVerilog") - //commonOptions = commonOptions.copy(globalLogLevel = logger.LogLevel.Info) - } - val success = chisel3.Driver.execute(optionsManager, () => new ExampleTopModuleWithBB) match { - case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, verilog))) => - true - case _ => false - } - success should be(true) - val dir = optionsManager.commonOptions.targetDirName + val dir = "test_run_dir/PadsVerilog" + (new ChiselStage).emitFirrtl( + new ExampleTopModuleWithBB, + Array("-td", dir, "-X", "verilog") + ) checkOutputs(dir) } From 31590a7948db47fd16beed266c4833579acc305b Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Mon, 14 Sep 2020 13:24:44 -0700 Subject: [PATCH 176/273] Undo regression in iocell flexibility --- iocell/src/main/scala/chisel/IOCell.scala | 25 +++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index 452aadedb..93dfac2f4 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -66,19 +66,19 @@ class DigitalInIOCellBundle extends Bundle { trait IOCell extends BaseModule trait AnalogIOCell extends IOCell { - val io = IO(new AnalogIOCellBundle) + val io: AnalogIOCellBundle } trait DigitalGPIOCell extends IOCell { - val io = IO(new DigitalGPIOCellBundle) + val io: DigitalGPIOCellBundle } trait DigitalInIOCell extends IOCell { - val io = IO(new DigitalInIOCellBundle) + val io: DigitalInIOCellBundle } trait DigitalOutIOCell extends IOCell { - val io = IO(new DigitalOutIOCellBundle) + val io: DigitalOutIOCellBundle } // The following Generic IO cell black boxes have verilog models that mimic a very simple @@ -89,10 +89,19 @@ abstract class GenericIOCell extends BlackBox with HasBlackBoxResource { addResource("/barstools/iocell/vsrc/IOCell.v") } -class GenericAnalogIOCell extends GenericIOCell with AnalogIOCell -class GenericDigitalGPIOCell extends GenericIOCell with DigitalGPIOCell -class GenericDigitalInIOCell extends GenericIOCell with DigitalInIOCell -class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell +class GenericAnalogIOCell extends GenericIOCell with AnalogIOCell { + val io = IO(new AnalogIOCellBundle) +} +class GenericDigitalGPIOCell extends GenericIOCell with DigitalGPIOCell { + val io = IO(new DigitalGPIOCellBundle) +} +class GenericDigitalInIOCell extends GenericIOCell with DigitalInIOCell { + val io = IO(new DigitalInIOCellBundle) +} +class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell { + val io = IO(new DigitalOutIOCellBundle) +} + trait IOCellTypeParams { def analog(): AnalogIOCell From 847f72eca0fa3207ab7140c07e980ac9f8cf1251 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Mon, 14 Sep 2020 19:39:44 -0700 Subject: [PATCH 177/273] Support plusarg_reader blackbox in the harness --- tapeout/src/main/scala/transforms/Generate.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 89df8b556..3ed105fea 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -221,7 +221,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => // Execute top and get list of ExtModules to avoid collisions val topExtModules = executeTop() - val externals = Seq("SimSerial", "SimDTM") ++ harnessTop ++ synTop + val externals = Seq("SimSerial", "SimDTM", "plusarg_reader") ++ harnessTop ++ synTop val harnessAnnos = tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)).toSeq ++ From 4a5c75fcf85f03af858f1d7db04303d4b0733de7 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Thu, 17 Sep 2020 13:21:32 -0700 Subject: [PATCH 178/273] Add explicit naming of IOs generated by generateIOFromSignal --- iocell/src/main/scala/chisel/IOCell.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/chisel/IOCell.scala index 93dfac2f4..a5926030b 100644 --- a/iocell/src/main/scala/chisel/IOCell.scala +++ b/iocell/src/main/scala/chisel/IOCell.scala @@ -127,13 +127,13 @@ object IOCell { * AsyncReset, and otherwise to Bool (sync reset) * @return A tuple of (the generated IO data node, a Seq of all generated IO cell instances) */ - def generateIOFromSignal[T <: Data](coreSignal: T, name: Option[String] = None, + def generateIOFromSignal[T <: Data](coreSignal: T, name: String, typeParams: IOCellTypeParams = GenericIOCellParams(), abstractResetAsAsync: Boolean = false): (T, Seq[IOCell]) = { - val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)) + val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)).suggestName(name) val resetFn = if (abstractResetAsAsync) toAsyncReset else toSyncReset - val iocells = IOCell.generateFromSignal(coreSignal, padSignal, name, typeParams, resetFn) + val iocells = IOCell.generateFromSignal(coreSignal, padSignal, Some(s"iocell_$name"), typeParams, resetFn) (padSignal, iocells) } From 0430403920144e9c4b3ccc1d8c7fe5cfeac60ebf Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 28 Sep 2020 15:20:42 -0700 Subject: [PATCH 179/273] - Simplest way to make custom transforms run in same place as they did prior to Dependency API --- .../main/scala/transforms/.pads/AddIOPadsTransform.scala | 6 ++++++ .../src/main/scala/transforms/AddSuffixToModuleNames.scala | 5 +++++ tapeout/src/main/scala/transforms/EnumerateModules.scala | 5 +++++ tapeout/src/main/scala/transforms/ResetInverter.scala | 6 ++++++ tapeout/src/main/scala/transforms/retime/Retime.scala | 6 ++++++ tapeout/src/main/scala/transforms/utils/FileUtils.scala | 6 ++++++ 6 files changed, 34 insertions(+) diff --git a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala index 1d12adb8f..b79c1093a 100644 --- a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala +++ b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala @@ -5,12 +5,18 @@ package barstools.tapeout.transforms.pads import firrtl._ import firrtl.passes._ import barstools.tapeout.transforms._ +import firrtl.options.Dependency +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency import scala.collection.mutable // Main Add IO Pad transform operates on low Firrtl class AddIOPadsTransform extends Transform with SeqTransformBased with DependencyAPIMigration { + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + val transformList = new mutable.ArrayBuffer[Transform] def transforms: Seq[Transform] = transformList diff --git a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala index ff8c18578..ada3a719b 100644 --- a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala +++ b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala @@ -6,6 +6,8 @@ import firrtl._ import firrtl.ir._ import firrtl.annotations._ import firrtl.Mappers._ +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency case class KeepNameAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { @@ -23,6 +25,9 @@ case class ModuleNameSuffixAnnotation(target: CircuitTarget, suffix: String) // emitted, this should be safe. class AddSuffixToModuleNames extends Transform with DependencyAPIMigration { + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + def processAnnos(annos: AnnotationSeq): (AnnotationSeq, (String) => String) = { val whitelist = annos.collect({ case KeepNameAnnotation(tgt) => tgt.module }).toSet val newAnnos = annos.filterNot(_.isInstanceOf[ModuleNameSuffixAnnotation]) diff --git a/tapeout/src/main/scala/transforms/EnumerateModules.scala b/tapeout/src/main/scala/transforms/EnumerateModules.scala index 4bd2855f1..f1f66033b 100644 --- a/tapeout/src/main/scala/transforms/EnumerateModules.scala +++ b/tapeout/src/main/scala/transforms/EnumerateModules.scala @@ -5,6 +5,8 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ import firrtl.passes.Pass +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency class EnumerateModulesPass(enumerate: (Module) => Unit) extends Pass { @@ -23,6 +25,9 @@ class EnumerateModulesPass(enumerate: (Module) => Unit) extends Pass { class EnumerateModules(enumerate: (Module) => Unit) extends Transform with SeqTransformBased with DependencyAPIMigration { + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + def transforms: Seq[Transform] = Seq(new EnumerateModulesPass(enumerate)) def execute(state: CircuitState): CircuitState = { diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index e6bdad458..2cbbd45a4 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -7,6 +7,8 @@ import firrtl.PrimOps.Not import firrtl.annotations.{Annotation, CircuitName, ModuleName, SingleTargetAnnotation} import firrtl.ir._ import firrtl.passes.Pass +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency import firrtl.{CircuitState, DependencyAPIMigration, Transform} case class ResetInverterAnnotation(target: ModuleName) extends SingleTargetAnnotation[ModuleName] { @@ -39,6 +41,10 @@ object ResetN extends Pass { } class ResetInverterTransform extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def execute(state: CircuitState): CircuitState = { state.annotations.filter(_.isInstanceOf[ResetInverterAnnotation]) match { case Nil => state diff --git a/tapeout/src/main/scala/transforms/retime/Retime.scala b/tapeout/src/main/scala/transforms/retime/Retime.scala index f790f7b3c..e554beadf 100644 --- a/tapeout/src/main/scala/transforms/retime/Retime.scala +++ b/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -4,6 +4,8 @@ package barstools.tapeout.transforms.retime import chisel3.experimental.RunFirrtlTransform import firrtl.annotations._ +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency import firrtl.{CircuitState, DependencyAPIMigration, Transform} case class RetimeAnnotation(target: Named) extends SingleTargetAnnotation[Named] { @@ -11,6 +13,10 @@ case class RetimeAnnotation(target: Named) extends SingleTargetAnnotation[Named] } class RetimeTransform extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def execute(state: CircuitState): CircuitState = { state.annotations.filter(_.isInstanceOf[RetimeAnnotation]) match { case Nil => state diff --git a/tapeout/src/main/scala/transforms/utils/FileUtils.scala b/tapeout/src/main/scala/transforms/utils/FileUtils.scala index 76ee45ec1..0d38d3769 100644 --- a/tapeout/src/main/scala/transforms/utils/FileUtils.scala +++ b/tapeout/src/main/scala/transforms/utils/FileUtils.scala @@ -5,6 +5,8 @@ package barstools.tapeout.transforms import chisel3.experimental.{ChiselAnnotation, annotate} import firrtl._ import firrtl.annotations._ +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency import firrtl.transforms.BlackBoxTargetDirAnno object WriteConfig { @@ -50,6 +52,10 @@ case class TechnologyLocationAnnotation(dir: String) extends SingleTargetAnnotat } class TechnologyLocation extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + def execute(state: CircuitState): CircuitState = { throw new Exception("Technology Location transform execution doesn't work!") } From f51156bf1fcebc11d82a9bb357d3134652ff4f39 Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 28 Sep 2020 15:34:36 -0700 Subject: [PATCH 180/273] - Fixed ResetNSpec --- tapeout/src/main/scala/transforms/ResetInverter.scala | 3 ++- tapeout/src/test/scala/transforms/ResetInverterSpec.scala | 8 +++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index 2cbbd45a4..22b2a7048 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -24,7 +24,8 @@ object ResetN extends Pass { "Can only invert reset on a module with reset!") // Rename "reset" to "reset_n" val portsx = mod.ports map { - case Port(info, "reset", Input, Bool) => Port(info, "reset_n", Input, Bool) + case Port(info, "reset", Input, Bool) => + Port(info, "reset_n", Input, Bool) case other => other } val newReset = DefNode(NoInfo, "reset", DoPrim(Not, Seq(Reference("reset_n", Bool)), Seq.empty, Bool)) diff --git a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala index 4b5de9677..fe2042880 100644 --- a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala @@ -19,13 +19,15 @@ class ExampleModuleNeedsResetInverted extends Module with ResetInverter { } class ResetNSpec extends FreeSpec with Matchers { - "Inverting reset needs to be done throughout module" in { - val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array()) + "Inverting reset needs to be done throughout module in Chirrtl" in { + val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--no-run-firrtl")) chirrtl should include("input reset :") (chirrtl should not).include("input reset_n :") (chirrtl should not).include("node reset = not(reset_n)") + } - val firrtl = (new ChiselStage).emitFirrtl(new ExampleModuleNeedsResetInverted, Array("-X", "low")) + "Inverting reset needs to be done throughout module when generating firrtl" in { + val firrtl = (new ChiselStage).emitFirrtl(new ExampleModuleNeedsResetInverted) firrtl should include("input reset_n :") firrtl should include("node reset = not(reset_n)") (firrtl should not).include("input reset :") From 1a82c082b30d930ee68dd0f5289c2ac1309e615f Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 29 Sep 2020 10:11:46 -0700 Subject: [PATCH 181/273] - Make transfrorms run in as close to same order as before - Fix parsing of PadPlacement JSON --- .../transforms/.pads/AddIOPadsTransform.scala | 1 + .../scala/transforms/.pads/PadPlacement.scala | 70 +++++++++++++++---- .../transforms/AddSuffixToModuleNames.scala | 1 + .../scala/transforms/EnumerateModules.scala | 1 + .../main/scala/transforms/ResetInverter.scala | 1 + .../main/scala/transforms/retime/Retime.scala | 1 + .../scala/transforms/utils/FileUtils.scala | 1 + .../transforms/.pads/AddIOPadsSpec.scala | 5 +- 8 files changed, 65 insertions(+), 16 deletions(-) diff --git a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala index b79c1093a..ba095879b 100644 --- a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala +++ b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala @@ -15,6 +15,7 @@ import scala.collection.mutable class AddIOPadsTransform extends Transform with SeqTransformBased with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters val transformList = new mutable.ArrayBuffer[Transform] diff --git a/tapeout/src/main/scala/transforms/.pads/PadPlacement.scala b/tapeout/src/main/scala/transforms/.pads/PadPlacement.scala index 2d774f01e..d3e996266 100644 --- a/tapeout/src/main/scala/transforms/.pads/PadPlacement.scala +++ b/tapeout/src/main/scala/transforms/.pads/PadPlacement.scala @@ -1,20 +1,46 @@ +// See LICENSE for license details. + package barstools.tapeout.transforms.pads +import barstools.tapeout.transforms._ import net.jcazevedo.moultingyaml._ -import firrtl._ -import firrtl.ir._ -import barstools.tapeout.transforms._ +/** This is a hack to get around weird problem with yaml parser + * that without this gives PadPlacement defines additional fields + * + */ +trait HasPadPlacementFields { + def file: String + def left: String + def top: String + def right: String + def bottom: String + def instanceArray: String + def padLine: String + def template: String +} + +case class PadPlacementFields( + file: String, + left: String, + top: String, + right: String, + bottom: String, + instanceArray: String, + padLine: String, + template: String +) extends HasPadPlacementFields case class PadPlacement( - file: String, - left: String, - top: String, - right: String, - bottom: String, - instanceArray: String, - padLine: String, - template: String) { + file: String, + left: String, + top: String, + right: String, + bottom: String, + instanceArray: String, + padLine: String, + template: String +) extends HasPadPlacementFields { require(instanceArray contains "{{signal}}", "Instance Array Template should contain {{signal}}") require(instanceArray contains "{{idx}}", "Instance Array Template should contain {{idx}}") @@ -33,8 +59,8 @@ case class PadPlacement( case Bottom => bottom } - import com.gilt.handlebars.scala.binding.dynamic._ import com.gilt.handlebars.scala.Handlebars + import com.gilt.handlebars.scala.binding.dynamic._ private val instanceArrayTemplate = Handlebars(instanceArray.stripMargin) private val padLineTemplate = Handlebars(padLine.stripMargin) @@ -52,10 +78,26 @@ case class PadPlacementParams(leftPads: String, rightPads: String, topPads: Stri object PadPlacementFile extends DefaultYamlProtocol { val exampleResource = "/PadPlacement.yaml" - implicit val _pad = yamlFormat8(PadPlacement) + implicit val _pad = yamlFormat8(PadPlacementFields) + + def main(args: Array[String]): Unit = { + println(parse("RealTech/PadPlacement.yaml")) + } + def parse(file: String = ""): PadPlacement = { - (new YamlFileReader(exampleResource)).parse[PadPlacement](file).head + val fields = (new YamlFileReader(exampleResource)).parse[PadPlacementFields](file).head + PadPlacement( + file = fields.file, + left = fields.left, + top = fields.top, + right = fields.right, + bottom = fields.bottom, + instanceArray = fields.instanceArray, + padLine = fields.padLine, + template = fields.template + ) } + def generate( techDir: String, targetDir: String, diff --git a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala index ada3a719b..ab1dd4873 100644 --- a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala +++ b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala @@ -26,6 +26,7 @@ case class ModuleNameSuffixAnnotation(target: CircuitTarget, suffix: String) class AddSuffixToModuleNames extends Transform with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters def processAnnos(annos: AnnotationSeq): (AnnotationSeq, (String) => String) = { diff --git a/tapeout/src/main/scala/transforms/EnumerateModules.scala b/tapeout/src/main/scala/transforms/EnumerateModules.scala index f1f66033b..a2b499fd8 100644 --- a/tapeout/src/main/scala/transforms/EnumerateModules.scala +++ b/tapeout/src/main/scala/transforms/EnumerateModules.scala @@ -26,6 +26,7 @@ class EnumerateModules(enumerate: (Module) => Unit) extends Transform with SeqTransformBased with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters def transforms: Seq[Transform] = Seq(new EnumerateModulesPass(enumerate)) diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index 22b2a7048..f92822510 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -44,6 +44,7 @@ object ResetN extends Pass { class ResetInverterTransform extends Transform with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters override def execute(state: CircuitState): CircuitState = { diff --git a/tapeout/src/main/scala/transforms/retime/Retime.scala b/tapeout/src/main/scala/transforms/retime/Retime.scala index e554beadf..d88217c70 100644 --- a/tapeout/src/main/scala/transforms/retime/Retime.scala +++ b/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -15,6 +15,7 @@ case class RetimeAnnotation(target: Named) extends SingleTargetAnnotation[Named] class RetimeTransform extends Transform with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters override def execute(state: CircuitState): CircuitState = { diff --git a/tapeout/src/main/scala/transforms/utils/FileUtils.scala b/tapeout/src/main/scala/transforms/utils/FileUtils.scala index 0d38d3769..ded0474d7 100644 --- a/tapeout/src/main/scala/transforms/utils/FileUtils.scala +++ b/tapeout/src/main/scala/transforms/utils/FileUtils.scala @@ -54,6 +54,7 @@ case class TechnologyLocationAnnotation(dir: String) extends SingleTargetAnnotat class TechnologyLocation extends Transform with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters def execute(state: CircuitState): CircuitState = { diff --git a/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala b/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala index 7fe423422..12fcb4111 100644 --- a/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala +++ b/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala @@ -257,9 +257,10 @@ class IOPadSpec extends FlatSpec with Matchers { */ it should "create proper IO pads + black box in verilog" in { val dir = "test_run_dir/PadsVerilog" - (new ChiselStage).emitFirrtl( + (new ChiselStage).emitVerilog( new ExampleTopModuleWithBB, - Array("-td", dir, "-X", "verilog") +// Array("-td", dir, "-X", "verilog") + Array("-td", dir) ) checkOutputs(dir) } From 8903c04c2d9b084bce5ab7a5930ee2579cce46b0 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 29 Sep 2020 10:59:48 -0700 Subject: [PATCH 182/273] - fix call to `ceilLog2` in macros --- macros/src/test/scala/MacroCompilerSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index 0bc0f486a..d8e0d2df4 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -248,10 +248,10 @@ trait HasSimpleTestGenerator { val v = s"${generatorType}${extraTagPrefixed}.v" lazy val mem_name = "target_memory" - val mem_addr_width = ceilLog2(memDepth) + val mem_addr_width = MacroCompilerMath.ceilLog2(memDepth) lazy val lib_name = "awesome_lib_mem" - val lib_addr_width = ceilLog2(libDepth) + val lib_addr_width = MacroCompilerMath.ceilLog2(libDepth) // Override these to change the port prefixes if needed. def libPortPrefix: String = "lib" From a1dfd4f7741d8b5ff2bb4215f17af48b26cb3177 Mon Sep 17 00:00:00 2001 From: chick Date: Wed, 30 Sep 2020 15:04:56 -0700 Subject: [PATCH 183/273] Remove all of the PadStuff --- tapeout/src/main/resources/FoundryPads.yaml | 113 -------- tapeout/src/main/resources/PadPlacement.yaml | 43 --- .../transforms/.pads/AddIOPadsTransform.scala | 65 ----- .../scala/transforms/.pads/AddPadFrame.scala | 133 --------- .../transforms/.pads/AnnotatePortPads.scala | 134 --------- .../transforms/.pads/AnnotateSupplyPads.scala | 56 ---- .../transforms/.pads/ChiselTopModule.scala | 109 ------- .../scala/transforms/.pads/CreatePadBBs.scala | 109 ------- .../transforms/.pads/FoundryPadsYaml.scala | 129 --------- .../transforms/.pads/PadAnnotations.scala | 146 ---------- .../transforms/.pads/PadDescriptors.scala | 49 ---- .../scala/transforms/.pads/PadPlacement.scala | 158 ----------- tapeout/src/test/resources/PadPlacement.io | 236 --------------- .../transforms/.pads/AddIOPadsSpec.scala | 268 ------------------ 14 files changed, 1748 deletions(-) delete mode 100644 tapeout/src/main/resources/FoundryPads.yaml delete mode 100644 tapeout/src/main/resources/PadPlacement.yaml delete mode 100644 tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/AnnotatePortPads.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala delete mode 100644 tapeout/src/main/scala/transforms/.pads/PadPlacement.scala delete mode 100644 tapeout/src/test/resources/PadPlacement.io delete mode 100644 tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala diff --git a/tapeout/src/main/resources/FoundryPads.yaml b/tapeout/src/main/resources/FoundryPads.yaml deleted file mode 100644 index a6133be0c..000000000 --- a/tapeout/src/main/resources/FoundryPads.yaml +++ /dev/null @@ -1,113 +0,0 @@ -# Pad types must be one of digital, analog, or supply; pad names must be unique! -# This just shows you how you can template things with {{}}, if/else, and the following parameters: -# isInput: Boolean (each digital pad entry should be configurable between both input and output) -# isHorizontal: Boolean (each pad entry should be configurable between both horizontal and vertical) -# NOTE: Expects 1-bit in/out to be named in/out for digital; and 1-bit io for analog (supplies don't have ports) -# Expects module name to be obtained from {{name}} which is derived from yaml name, tpe in the Firrtl pass -# Pipe is used for stripping margins, but indentation is required before the pipe for the yaml reader to work ---- -tpe: analog -name: slow_foundry -width: 0 -height: 0 -verilog: | - |// Foundry Analog Pad Example - |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} - |// Call your instance PAD - |module {{name}}( - | inout io - |); - |endmodule ---- -tpe: analog -name: fast_custom -width: 0 -height: 0 -verilog: | - |// Custom Analog Pad Example - |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} - |// Call your instance PAD - |module {{name}}( - | inout io - |); - |endmodule ---- -tpe: digital -name: from_tristate_foundry -width: 0 -height: 0 -verilog: | - |// Digital Pad Example - |// Signal Direction: {{#if isInput}}Input{{else}}Output{{/if}} - |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} - |// Call your instance PAD - |module {{name}}( - | input in, - | output reg out - |); - | // Where you would normally dump your pad instance - | always @* begin - | out = in; - | end - |endmodule ---- -tpe: digital -name: fake_digital -width: 0 -height: 0 -verilog: | - |// (Fake/Unused) Digital Pad Example - |// Signal Direction: {{#if isInput}}Input{{else}}Output{{/if}} - |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} - |// Call your instance PAD - |module {{name}}( - | input in, - | output reg out - |); - | // Where you would normally dump your pad instance - | always @* begin - | out = in; - | end - |endmodule ---- -tpe: supply -name: vdd -width: 0 -height: 0 -supplySetNum: 1 -verilog: | - |// VDD Pad Example (No IO) - |// Can group some number together as required by the foundry - |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} - |// Call your instance array PAD[0:0], PAD[2:0], etc. - |module {{name}}( - |); - |endmodule ---- -tpe: supply -name: vss -width: 0 -height: 0 -supplySetNum: 2 -verilog: | - |// VSS Pad Example (No IO) - |// Can group some number together as required by the foundry - |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} - |// Call your instance array PAD[0:0], PAD[2:0], etc. - |module {{name}}( - |); - |endmodule ---- -tpe: supply -name: avss -width: 0 -height: 0 -supplySetNum: 1 -verilog: | - |// Analog VSS Pad Example (No IO) - |// Can group some number together as required by the foundry - |// Pad Orientation: {{#if isHorizontal}}Horizontal{{else}}Vertical{{/if}} - |// Call your instance array PAD[0:0], PAD[2:0], etc. - |module {{name}}( - |); - |endmodule diff --git a/tapeout/src/main/resources/PadPlacement.yaml b/tapeout/src/main/resources/PadPlacement.yaml deleted file mode 100644 index a8a94f1cd..000000000 --- a/tapeout/src/main/resources/PadPlacement.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Example for Innovus: https://legacy.iis.ee.ethz.ch/~vlsi2/ex05/ex05.pdf ---- -file: pads.io -left: "1" # Bottom to top -top: "2" # Left to right -right: "3" # Bottom to top -bottom: "4" # Left to right -# Note: In your scripts, you should specify instance array styles -# i.e. hdl_instance_array_naming_style string (For Genus) -instanceArray: "{{signal}}[{{idx}}]" -padLine: | - | (inst name = "{{padInst}}") # Side: {{side}}, Order: {{padIdx}} -template: | - |(globals - | version = 3 - | io_order = default - |) - |(iopad - | (bottomleft - | (inst name="corner_ll" cell="CORNER_EXAMPLE" ) - | ) - | (bottomright - | (inst name="corner_lr" orientation=MY cell="CORNER_EXAMPLE" ) - | ) - | (topleft - | (inst name="corner_ul" orientation=MX cell="CORNER_EXAMPLE" ) - | ) - | (topright - | (inst name="corner_ur" cell="CORNER_EXAMPLE" ) - | ) - | (left - |{{leftPads}} - | ) - | (right - |{{rightPads}} - | ) - | (top - |{{topPads}} - | ) - | (bottom - |{{bottomPads}} - | ) - |) \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala b/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala deleted file mode 100644 index ba095879b..000000000 --- a/tapeout/src/main/scala/transforms/.pads/AddIOPadsTransform.scala +++ /dev/null @@ -1,65 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.pads - -import firrtl._ -import firrtl.passes._ -import barstools.tapeout.transforms._ -import firrtl.options.Dependency -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency - -import scala.collection.mutable - -// Main Add IO Pad transform operates on low Firrtl -class AddIOPadsTransform extends Transform with SeqTransformBased with DependencyAPIMigration { - - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized - override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters - - val transformList = new mutable.ArrayBuffer[Transform] - def transforms: Seq[Transform] = transformList - - override def execute(state: CircuitState): CircuitState = { - val collectedAnnos = HasPadAnnotation(state.annotations) - collectedAnnos match { - // Transform not used - case None => state - case Some(x) => - val techLoc = (new TechnologyLocation).get(state) - // Get foundry pad templates from yaml - val foundryPads = FoundryPadsYaml.parse(techLoc) - val portPads = AnnotatePortPads(state.circuit, x.topModName, foundryPads, x.componentAnnos, - HasPadAnnotation.getSide(x.defaultPadSide)) - val supplyPads = AnnotateSupplyPads(foundryPads, x.supplyAnnos) - val (circuitWithBBs, bbAnnotations) = CreatePadBBs(state.circuit, portPads, supplyPads) - val namespace = Namespace(state.circuit) - val padFrameName = namespace newName s"${x.topModName}_PadFrame" - val topInternalName = namespace newName s"${x.topModName}_Internal" - val targetDir = barstools.tapeout.transforms.GetTargetDir(state) - PadPlacementFile.generate(techLoc, targetDir, padFrameName, portPads, supplyPads) - transformList ++= Seq( - Legalize, - ResolveFlows, - // Types really need to be known... - InferTypes, - new AddPadFrame(x.topModName, padFrameName, topInternalName, portPads, supplyPads), - RemoveEmpty, - CheckInitialization, - InferTypes, - Uniquify, - ResolveKinds, - ResolveFlows - ) - // Expects BlackBox helper to be run after to inline pad Verilog! - val ret = runTransforms(state) - val currentAnnos = ret.annotations - val newAnnoMap = AnnotationSeq(currentAnnos ++ bbAnnotations) - val newState = CircuitState(ret.circuit, outputForm, newAnnoMap, ret.renames) - - // TODO: *.f file is overwritten on subsequent executions, but it doesn't seem to be used anywhere? - (new firrtl.transforms.BlackBoxSourceHelper).execute(newState) - } - } -} diff --git a/tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala b/tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala deleted file mode 100644 index 62447bd5b..000000000 --- a/tapeout/src/main/scala/transforms/.pads/AddPadFrame.scala +++ /dev/null @@ -1,133 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.pads - -import firrtl.annotations._ -import firrtl.ir._ -import firrtl._ -import firrtl.passes.Pass - -// Analog is like UInt, SInt; it's not a direction (which is kind of weird) -// WARNING: Analog type is associated with Verilog InOut! i.e. even if digital pads are tri-statable, b/c tristate -// requires an additional ctrl signal, digital pads must be operated in a single "static" condition here; Analog will -// be paired with analog pads - -class AddPadFrame( - topMod: String, - padFrameName: String, - topInternalName: String, - ioPads: Seq[PortIOPad], - supplyPads: Seq[TopSupplyPad]) extends Pass { - - def run(c: Circuit): Circuit = { - // New modules consist of old modules (with top renamed to internal) + padFrame + newTop - val newMods = c.modules.map { - case mod: Module if mod.name == topMod => - // Original top module is now internal module - mod.copy(name = topInternalName) - case m => m - } ++ Seq(buildPadFrame(), buildTopWrapper()) - - // Reparent so circuit top is whatever uses pads! - // TODO: Can the top level be a blackbox? - c.copy(modules = newMods, main = topMod) - } - - def intName(p: PortIOPad) = s"${p.portName}_Int" - def extName(p: PortIOPad) = s"${p.portName}_Ext" - - def buildTopWrapper(): Module = { - // outside -> padframe -> internal - // Top (with same name) contains 1) padframe + 2) internal signals - val padFrameInst = WDefInstance(padFrameName, padFrameName) - val topInternalInst = WDefInstance(topInternalName, topInternalName) - val padFrameRef = WRef(padFrameName) - val topInternalRef = WRef(topInternalName) - val connects = ioPads.map { p => - val io = WRef(p.portName) - val intIo = WSubField(topInternalRef, p.portName) - val padFrameIntIo = WSubField(padFrameRef, intName(p)) - val padFrameExtIo = WSubField(padFrameRef, extName(p)) - p.port.tpe match { - case AnalogType(_) => - // Analog pads only have 1 port - // If Analog port doesn't have associated pad, don't hook it up to the padframe - val analogAttachInt = Seq(Attach(NoInfo, Seq(io, intIo))) - if (p.pad.isEmpty) analogAttachInt - else analogAttachInt :+ Attach(NoInfo, Seq(io, padFrameExtIo)) - case _ => p.portDirection match { - case Input => - // input to padframe ; padframe to internal - Seq(Connect(NoInfo, padFrameExtIo, io), Connect(NoInfo, intIo, padFrameIntIo)) - case Output => - // internal to padframe ; padframe to output - Seq(Connect(NoInfo, padFrameIntIo, intIo), Connect(NoInfo, io, padFrameExtIo)) - } - } - }.flatten - val stmts = Seq(padFrameInst, topInternalInst) ++ connects - val ports = ioPads.map(p => p.port) - Module(NoInfo, topMod, ports = ports, body = Block(stmts)) - } - - def buildPadFrame(): Module = { - // Internal = connection to original RTL; External = connection to outside world - // Note that for analog pads, since there's only 1 port, only _Ext is used - val intPorts = ioPads.map(p => p.port.tpe match { - case AnalogType(_) => None - case _ => Some(p.port.copy(name = intName(p), direction = Utils.swap(p.portDirection))) - }).flatten - val extPorts = ioPads.map(p => p.port.tpe match { - // If an analog port doesn't have a pad associated with it, don't add it to the padframe - case AnalogType(_) if p.pad.isEmpty => None - case _ => Some(p.port.copy(name = extName(p))) - } ).flatten - // Only create pad black boxes for ports that require them - val ioPadInsts = ioPads.filter(x => !x.pad.isEmpty).map(p => WDefInstance(p.firrtlBBName, p.firrtlBBName)) - // Connect to pad only if used ; otherwise leave dangling for Analog - // and just connect through for digital (assumes no supplies) - val connects = ioPads.map { p => - val intRef = WRef(intName(p), p.port.tpe) - val extRef = WRef(extName(p), p.port.tpe) - p.pad match { - // No pad needed -- just connect through - case None => p.port.tpe match { - case AnalogType(_) => - Seq(EmptyStmt) - case _ => - val (lhs, rhs) = p.portDirection match { - case Input => (intRef, extRef) - case Output => (extRef, intRef) - } - Seq(Connect(NoInfo, lhs, rhs)) - } - // Add pad - case Some(x) => - val padRef = WRef(p.firrtlBBName) - p.port.tpe match { - // Analog type has 1:1 mapping to inout - case AnalogType(_) => - val padIORef = WSubField(padRef, AnalogPad.ioName) - Seq(Attach(NoInfo, Seq(padIORef, extRef))) - // Normal verilog in/out can be mapped to uint, sint, or clocktype, so need cast - case _ => - val padBBType = UIntType(getWidth(p.port.tpe)) - val padInRef = WSubField(padRef, DigitalPad.inName, padBBType, UnknownFlow) - val padOutRef = WSubField(padRef, DigitalPad.outName, padBBType, UnknownFlow) - val (rhsPadIn, lhsPadOut) = p.portDirection match { - case Input => (extRef, intRef) - case Output => (intRef, extRef) - } - // Pad inputs are treated as UInts, so need to do type conversion - // from type to UInt pad input; from pad output to type - Seq( - Connect(NoInfo, padInRef, castRhs(padBBType, rhsPadIn)), - Connect(NoInfo, lhsPadOut, castRhs(p.port.tpe, padOutRef))) - } - } - }.flatten - val supplyPadInsts = supplyPads.map(p => p.instNames.map(n => WDefInstance(n, p.firrtlBBName))).flatten - Module(NoInfo, padFrameName, ports = intPorts ++ extPorts, body = Block(ioPadInsts ++ connects ++ supplyPadInsts)) - } - -} diff --git a/tapeout/src/main/scala/transforms/.pads/AnnotatePortPads.scala b/tapeout/src/main/scala/transforms/.pads/AnnotatePortPads.scala deleted file mode 100644 index 8164463e2..000000000 --- a/tapeout/src/main/scala/transforms/.pads/AnnotatePortPads.scala +++ /dev/null @@ -1,134 +0,0 @@ -package barstools.tapeout.transforms.pads - -import firrtl.annotations._ -import firrtl._ -import firrtl.ir._ -import barstools.tapeout.transforms._ - -// TODO: Make some trait with commonalities between IO Pad + supply pad - -// Pads associated with IO Ports! (Not supplies!) -case class PortIOPad( - pad: Option[FoundryPad], - padSide: PadSide, - port: Port) { - - def arrayInstNamePrefix(mod: String): String = Seq(mod, firrtlBBName, getPadName).mkString("/") - def arrayInstNameSuffix: String = pad match { - case None => throw new Exception("Port needs to use pad to get array instance name!") - case Some(x) => "/" + x.padInstName - } - - def portName = port.name - def portWidth = bitWidth(port.tpe).intValue - def portDirection = port.direction - def padOrientation = padSide.orientation - def padType = pad match { - case None => NoPad - case Some(x) => x.padType - } - - def widthParamName = "WIDTH" - def getPadName: String = pad match { - case None => throw new Exception("Cannot get pad name when no pad specified!") - case Some(x) => x.getName(portDirection, padOrientation) - } - def getPadArrayName: String = Seq(getPadName, "array").mkString("_") - // Firrtl black box name must be unique, even though the parameterized Verilog modules don't - // need to have separate names - def firrtlBBName = Seq(getPadArrayName, portName).mkString("_") - - // Note: This includes both the pad wrapper + an additional wrapper for n-bit wide to - // multiple pad conversion! - def createPadInline(): String = { - // For blackboxing bit extraction/concatenation (with module arrays) - def io(): String = padType match { - case DigitalPad => - s"""| input [${widthParamName}-1:0] ${DigitalPad.inName}, - | output reg [${widthParamName}-1:0] ${DigitalPad.outName}""".stripMargin - case AnalogPad => - s" inout [${widthParamName}-1:0] ${AnalogPad.ioName}" - case _ => throw new Exception("IO pad can only be digital or analog") - } - def assignIO(): String = padType match { - case DigitalPad => - s"""| .${DigitalPad.inName}(${DigitalPad.inName}), - | .${DigitalPad.outName}(${DigitalPad.outName})""".stripMargin - case AnalogPad => - s" .${AnalogPad.ioName}(${AnalogPad.ioName})" - case _ => throw new Exception("IO pad can only be digital or analog") - } - def getPadVerilog(): String = pad match { - case None => throw new Exception("Cannot get Verilog when no pad specified!") - case Some(x) => x.getVerilog(portDirection, padOrientation) - } - s"""inline - |${getPadArrayName}.v - |${getPadVerilog} - |module ${getPadArrayName} #( - | parameter int ${widthParamName}=1 - |)( - |${io} - |); - | ${getPadName} ${getPadName}[${widthParamName}-1:0]( - |${assignIO} - | ); - |endmodule""".stripMargin - } -} - -object AnnotatePortPads { - def apply( - c: Circuit, - topMod: String, - pads: Seq[FoundryPad], - componentAnnos: Seq[TargetIOPadAnnoF], - defaultSide: PadSide): Seq[PortIOPad] = { - - def lowerAnnotations(): Seq[TargetIOPadAnnoF] = { - componentAnnos map { x => x.target match { - case c: ComponentName => x.copy(target = c.copy(name = LowerName(c.name))) - case _ => throw new Exception("Not a component annotation! Can't lower!") - }} - } - - // Make annotations match low form - val annos = lowerAnnotations() - - def getPortIOPad(port: Port): PortIOPad = { - val portAnnos = annos.find(_.targetName == port.name) - // Ports can only be digital or analog - val padTypeRequired = port.tpe match { - case AnalogType(_) => AnalogPad - case _ => DigitalPad - } - val validPads = pads.filter(_.padType == padTypeRequired) - require(validPads.length > 0, s"No ${padTypeRequired.serialize} pads specified in the config yaml file!") - portAnnos match { - case None => - // If no pad-related annotation is found on a port, use defaults based off of port type - PortIOPad(Some(validPads.head), defaultSide, port) - case Some(x) => - x.anno match { - case NoIOPadAnnotation(_) => - // Some ports might not want attached pads - PortIOPad(None, defaultSide, port) - case IOPadAnnotation(padSide, padName) if padName.isEmpty => - // If no pad name is used, select the first valid pad based off of port type - PortIOPad(Some(validPads.head), HasPadAnnotation.getSide(padSide), port) - case IOPadAnnotation(padSide, padName) => - // If name doesn't match any provided -- maybe someone typoed? - validPads.find(_.name == padName) match { - case None => - throw new Exception( - s"Pad name associated with ${port.name} doesn't match valid pad names. Did you typo?") - case Some(x) => - PortIOPad(Some(x), HasPadAnnotation.getSide(padSide), port) - } - } - } - } - // Top MUST be internal module - c.modules.filter(_.name == topMod).head.ports.map(x => getPortIOPad(x)) - } -} \ No newline at end of file diff --git a/tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala b/tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala deleted file mode 100644 index cda007914..000000000 --- a/tapeout/src/main/scala/transforms/.pads/AnnotateSupplyPads.scala +++ /dev/null @@ -1,56 +0,0 @@ -package barstools.tapeout.transforms.pads - -import firrtl.annotations._ -import firrtl._ -import firrtl.ir._ -import firrtl.passes._ - -case class TopSupplyPad( - pad: FoundryPad, - padSide: PadSide, - num: Int -) { - - // TODO: These should be pulled into some common trait (supply + io)! - - def arrayInstNamePrefix(mod: String): Seq[String] = { - instNames.map(n => Seq(mod, n, pad.padInstName).mkString("/")) - } - def supplySetNum = pad.getSupplySetNum - - def padType = pad.padType - require(pad.padType == SupplyPad) - - def padOrientation = padSide.orientation - def getPadName = pad.getName(Output/*Should be None*/, padOrientation) - def firrtlBBName = getPadName - private def instNamePrefix = Seq(firrtlBBName, padSide.serialize).mkString("_") - def instNames = (0 until num).map(i => Seq(instNamePrefix, i.toString).mkString("_")) - - def createPadInline(): String = { - def getPadVerilog(): String = pad.getVerilog(Output/*Should be None*/, padOrientation) - s"""inline - |${getPadName}.v - |${getPadVerilog}""".stripMargin - } -} - -object AnnotateSupplyPads { - def apply( - pads: Seq[FoundryPad], - supplyAnnos: Seq[SupplyAnnotation] - ): Seq[TopSupplyPad] = { - supplyAnnos.map( a => - pads.find(_.name == a.padName) match { - case None => - throw new Exception(s"Supply pad ${a.padName} not found in Yaml file!") - case Some(x) => - Seq( - TopSupplyPad(x, Left, a.leftSide), - TopSupplyPad(x, Right, a.rightSide), - TopSupplyPad(x, Top, a.topSide), - TopSupplyPad(x, Bottom, a.bottomSide)) - } - ).flatten.filter(_.num > 0) - } -} diff --git a/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala b/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala deleted file mode 100644 index 5b2ed28a0..000000000 --- a/tapeout/src/main/scala/transforms/.pads/ChiselTopModule.scala +++ /dev/null @@ -1,109 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.pads - -import chisel3._ -import chisel3.experimental._ -import firrtl.Transform -import firrtl.annotations.Annotation - -// TODO: Move out of pads - -// NOTE: You can't really annotate outside of the module itself UNLESS you break up the compile step in 2 i.e. -// annotate post-Chisel but pre-Firrtl (unfortunate non-generator friendly downside). -// It's recommended to have a Tapeout specific TopModule wrapper. -// LIMITATION: All signals of a bus must be on the same chip side - -// Chisel-y annotations -abstract class TopModule( - supplyAnnos: Seq[SupplyAnnotation] = Seq.empty, - defaultPadSide: PadSide = Top, - coreWidth: Int = 0, - coreHeight: Int = 0, - usePads: Boolean = true, - override_clock: Option[Clock] = None, - override_reset: Option[Bool] = None) extends Module { - - override_clock.foreach(clock := _) - override_reset.foreach(reset := _) - - private val mySelf = this - - // Annotate module as top module (that requires pad transform) - // Specify the yaml file that indicates how pads are templated, - // the default chip side that pads should be placed (if nothing is specified per IO), - // and supply annotations: supply pad name, location, and # - def createPads(): Unit = if (usePads) { - val modulePadAnnotation = ModulePadAnnotation( - defaultPadSide = defaultPadSide.serialize, - coreWidth = coreWidth, - coreHeight = coreHeight, - supplyAnnos = supplyAnnos - ) - //TODO: PORT-1.4: Remove commented code - // annotate(TargetModulePadAnnoC(this, modulePadAnnotation)) - annotate(new ChiselAnnotation with RunFirrtlTransform { - override def toFirrtl: Annotation = { - TargetModulePadAnnoF(mySelf.toNamed, modulePadAnnotation) - } - def transformClass: Class[_ <: Transform] = classOf[AddIOPadsTransform] - }) - } - - private def extractElementNames(signal: Data): Seq[String] = { - val names = signal match { - case elt: Record => - elt.elements.map { case (key, value) => extractElementNames(value).map(x => key + "_" + x) }.toSeq.flatten - case elt: Vec[_] => - elt.zipWithIndex.map { case (elt, i) => extractElementNames(elt).map(x => i + "_" + x) }.toSeq.flatten - case elt: Element => Seq("") - case elt => throw new Exception(s"Cannot extractElementNames for type ${elt.getClass}") - } - names.map(s => s.stripSuffix("_")) - } - - // TODO: Replace! - def extractElements(signal: Data): Seq[Element] = { - signal match { - case elt: Record => - elt.elements.map { case (key, value) => extractElements(value) }.toSeq.flatten - case elt: Vec[_] => - elt.map { elt => extractElements(elt) }.toSeq.flatten - case elt: Element => Seq(elt) - case elt => throw new Exception(s"Cannot extractElements for type ${elt.getClass}") - } - } - - // Annotate IO with side + pad name - def annotatePad(sig: Element, side: PadSide = defaultPadSide, name: String = ""): Unit = if (usePads) { - val anno = IOPadAnnotation(side.serialize, name) - annotate(new ChiselAnnotation with RunFirrtlTransform { - override def toFirrtl: Annotation = { - TargetIOPadAnnoF(sig.toTarget, anno) - } - def transformClass: Class[_ <: Transform] = classOf[AddIOPadsTransform] - }) - } - def annotatePad(sig: Aggregate, name: String): Unit = annotatePad(sig, side = defaultPadSide, name) - def annotatePad(sig: Aggregate, side: PadSide): Unit = annotatePad(sig, side, name = "") - def annotatePad(sig: Aggregate, side: PadSide, name: String): Unit = - extractElements(sig) foreach { x => annotatePad(x, side, name) } - - // There may be cases where pads were inserted elsewhere. If that's the case, allow certain IO to - // not have pads auto added. Note that annotatePad and noPad are mutually exclusive! - def noPad(sig: Element): Unit = { - if (usePads) { - annotate(new ChiselAnnotation with RunFirrtlTransform { - override def toFirrtl: Annotation = { - TargetIOPadAnnoF(sig.toTarget, NoIOPadAnnotation()) - } - def transformClass: Class[_ <: Transform] = classOf[AddIOPadsTransform] - }) - } - } - def noPad(sig: Aggregate): Unit = extractElements(sig) foreach { x => noPad(x) } - - // Since this is a super class, this should be the first thing that gets run - // (at least when the module is actually at the top -- currently no guarantees otherwise :( firrtl limitation) - createPads() -} diff --git a/tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala b/tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala deleted file mode 100644 index 5b35fbe4f..000000000 --- a/tapeout/src/main/scala/transforms/.pads/CreatePadBBs.scala +++ /dev/null @@ -1,109 +0,0 @@ -package barstools.tapeout.transforms.pads - -import firrtl.annotations._ -import firrtl._ -import firrtl.ir._ -import firrtl.transforms._ - -object CreatePadBBs { - - private [barstools] case class UsedPadInfo( - // The following are found with both supply + io pads - padInline: String, // Verilog txt - padName: String, // Pad module name - padType: PadType, // Pad type: supply, analog, digital - // The following only affects io pads (due to using parameterized modules for bit extraction / cat) - padArrayName: String, // Name of parameterized pad wrapper (that does bit extract/cat) - firrtlBBName: String, // Unique Firrtl name of each parameterized pad wrapper - portWidth: Int // Port width for analog/digital - ) - - def convertToUsedPad(p: PortIOPad): UsedPadInfo = { - UsedPadInfo( - padInline = p.createPadInline, - padName = p.getPadName, - padType = p.padType, - padArrayName = p.getPadArrayName, - firrtlBBName = p.firrtlBBName, - portWidth = p.portWidth) - } - - def convertToUsedPad(p: TopSupplyPad): UsedPadInfo = { - UsedPadInfo( - padInline = p.createPadInline, - padName = p.getPadName, - padType = p.padType, - // Supply pads don't require bit extraction / cat so don't care - padArrayName = p.getPadName, - firrtlBBName = p.getPadName, - portWidth = 0) - } - - def checkLegalPadName(namespace: Namespace, usedPads: Seq[UsedPadInfo]): Unit = { - usedPads foreach { x => - if (namespace contains x.padName) - throw new Exception(s"Pad name ${x.padName} already used!") - if (namespace contains x.padArrayName) - throw new Exception(s"Pad array ${x.padArrayName} name already used!") - if (namespace contains x.firrtlBBName) - throw new Exception(s"Firrtl black box ${x.firrtlBBName} name already used!") - } - } - - def apply( - c: Circuit, - ioPads: Seq[PortIOPad], - supplyPads: Seq[TopSupplyPad]): (Circuit, Seq[Annotation]) = { - - // Add black boxes for both supply + (used) io pads - val usedPads = ioPads.filter(x => x.pad.nonEmpty).map(convertToUsedPad(_)) ++ supplyPads.map(convertToUsedPad(_)) - checkLegalPadName(Namespace(c), usedPads) - - // Note that we need to check for Firrtl name uniqueness here! (due to parameterization) - val uniqueExtMods = scala.collection.mutable.ArrayBuffer[UsedPadInfo]() - usedPads foreach { x => - if (uniqueExtMods.find(_.firrtlBBName == x.firrtlBBName).isEmpty) - uniqueExtMods += x - } - - // Collecting unique parameterized black boxes - // (for io, they're wrapped pads; for supply, they're pad modules directly) - val uniqueParameterizedBBs = scala.collection.mutable.ArrayBuffer[UsedPadInfo]() - uniqueExtMods foreach { x => - if (uniqueParameterizedBBs.find(_.padArrayName == x.padArrayName).isEmpty) - uniqueParameterizedBBs += x - } - - // Note: Firrtl is silly and doesn't implement true parameterization -- each module with - // parameterization that potentially affects # of IO needs to be uniquely identified - // (but only in Firrtl) - val bbs = uniqueExtMods.map(x => { - // Supply pads don't have ports - val ports = x.padType match { - case AnalogPad => Seq(Port(NoInfo, AnalogPad.ioName, Input, AnalogType(IntWidth(x.portWidth)))) - case DigitalPad => Seq( - Port(NoInfo, DigitalPad.inName, Input, UIntType(IntWidth(x.portWidth))), - Port(NoInfo, DigitalPad.outName, Output, UIntType(IntWidth(x.portWidth))) - ) - case SupplyPad => Seq.empty - case _ => throw new Exception("Port pad type invalid!") - } - // Supply black boxes are not parameterized - val params = x.padType match { - case AnalogPad | DigitalPad => Seq(IntParam(ioPads.head.widthParamName, x.portWidth)) - case SupplyPad => Seq() - case _ => throw new Exception("Port pad type invalid!") - } - // Firrtl name is unique - ExtModule(NoInfo, x.firrtlBBName, ports, x.padArrayName, params) - } ).toSeq - - // Add annotations to black boxes to inline Verilog from template - // Again, note the weirdness in parameterization -- just need to hook to one matching Firrtl instance - val annos = uniqueParameterizedBBs.map(x => - BlackBoxInlineAnno(ModuleName(x.firrtlBBName, CircuitName(c.main)), x.firrtlBBName, x.padInline) - ).toSeq - (c.copy(modules = c.modules ++ bbs), annos) - } - -} diff --git a/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala b/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala deleted file mode 100644 index ce19a6d7f..000000000 --- a/tapeout/src/main/scala/transforms/.pads/FoundryPadsYaml.scala +++ /dev/null @@ -1,129 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.pads - -import net.jcazevedo.moultingyaml._ - -import firrtl._ -import firrtl.ir._ -import barstools.tapeout.transforms._ - -trait HasFoundryPadFields { - val tpe: String - val name: String - val width: Int - val height: Int - val supplySetNum: Option[Int] - val verilog: String -} - -case class FoundryPadFields( - tpe: String, - name: String, - width: Int, - height: Int, - supplySetNum: Option[Int], - verilog: String) - extends HasFoundryPadFields - -case class FoundryPad( - tpe: String, - name: String, - width: Int, - height: Int, - supplySetNum: Option[Int], - verilog: String) - extends HasFoundryPadFields { - - def padInstName = "PAD" - - require(verilog.contains("{{#if isHorizontal}}"), "All pad templates must contain '{{#if isHorizontal}}'") - require(verilog.contains("{{name}}"), "All pad templates must contain module name '{{name}}'") - require(verilog.contains(padInstName), s"All pad templates should have instances called ${padInstName}") - - def getSupplySetNum = supplySetNum.getOrElse(1) - - val padType = tpe match { - case "digital" => - require(verilog.contains(DigitalPad.inName), "Digital pad template must contain input called 'in'") - require(verilog.contains(DigitalPad.outName), "Digital pad template must contain output called 'out'") - require(verilog.contains("{{#if isInput}}"), "Digital pad template must contain '{{#if isInput}}'") - DigitalPad - case "analog" => - require(verilog.contains(AnalogPad.ioName), "Analog pad template must contain inout called 'io'") - require(!verilog.contains("{{#if isInput}}"), "Analog pad template must not contain '{{#if isInput}}'") - AnalogPad - case "supply" => - // Supply pads don't have IO - require(!verilog.contains("{{#if isInput}}"), "Supply pad template must not contain '{{#if isInput}}'") - require( - verilog.contains(s"${padInstName}["), - "All supply pad templates should have instance arrays" + - " called ${padInstName}[n:0], where n = ${getSupplySetNum-1}" - ) - require(supplySetNum.nonEmpty, "# of grouped supply pads 'supplySetNum' should be specified!") - SupplyPad - case _ => throw new Exception("Illegal pad type in config!") - } - - import com.gilt.handlebars.scala.binding.dynamic._ - import com.gilt.handlebars.scala.Handlebars - private val template = Handlebars(verilog) - - // Make sure names don't have spaces in Verilog! - private[barstools] val correctedName = name.replace(" ", "_") - - case class TemplateParams( - // isInput only used with digital pads - isInput: Boolean, - isHorizontal: Boolean) { - - private val orient = if (isHorizontal) Horizontal.serialize else Vertical.serialize - private val dir = padType match { - case AnalogPad => "inout" - case SupplyPad => "none" - case DigitalPad => if (isInput) Input.serialize else Output.serialize - } - val name = { - val start = Seq("pad", tpe, correctedName, orient) - if (padType == DigitalPad) start :+ dir - else start - }.mkString("_") - } - - // Note: Analog + supply don't use direction - private def getTemplateParams(dir: Direction, orient: PadOrientation): TemplateParams = - TemplateParams(isInput = (dir == Input), isHorizontal = (orient == Horizontal)) - - def getVerilog(dir: Direction, orient: PadOrientation): String = { - val p = getTemplateParams(dir, orient) - template(p).stripMargin - } - - def getName(dir: Direction, orient: PadOrientation): String = getTemplateParams(dir, orient).name -} - -object FoundryPadsYaml extends DefaultYamlProtocol { - val exampleResource = "/FoundryPads.yaml" - implicit val _pad = yamlFormat6(FoundryPadFields) - def parse(techDir: String): Seq[FoundryPad] = { - val file = techDir + exampleResource - if (techDir != "" && !(new java.io.File(file)).exists()) { - throw new Exception(s"Technology directory $techDir must contain FoundryPads.yaml!") - } - val fieldsArray = (new YamlFileReader(exampleResource)).parse[FoundryPadFields](if (techDir == "") "" else file) - val out = fieldsArray.map { fields => - FoundryPad( - tpe = fields.tpe, - name = fields.name, - width = fields.width, - height = fields.height, - supplySetNum = fields.supplySetNum, - verilog = fields.verilog - ) - } - val padNames = out.map(x => x.correctedName) - require(padNames.distinct.length == padNames.length, "Pad names must be unique!") - out - } -} diff --git a/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala b/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala deleted file mode 100644 index 7ca497995..000000000 --- a/tapeout/src/main/scala/transforms/.pads/PadAnnotations.scala +++ /dev/null @@ -1,146 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.pads - -import firrtl.annotations._ -import net.jcazevedo.moultingyaml._ - -object PadAnnotationsYaml extends DefaultYamlProtocol { - implicit val _iopad = yamlFormat2(IOPadAnnotation) - implicit val _noiopad = yamlFormat1(NoIOPadAnnotation) - implicit val _supplyanno = yamlFormat5(SupplyAnnotation) - implicit val _modulepadanno = yamlFormat4(ModulePadAnnotation) - - // Putting these serialize methods here seems to fix warnings about missing implicits for the toYaml - def serialize(noIOPad: NoIOPadAnnotation): String = { - noIOPad.toYaml.prettyPrint - } - def serialize(ioPadAnnotation: IOPadAnnotation): String = { - ioPadAnnotation.toYaml.prettyPrint - } - def serialize(modulePadAnnotation: ModulePadAnnotation): String = { - modulePadAnnotation.toYaml.prettyPrint - } -} - -abstract class FirrtlPadTransformAnnotation { - def targetName: String -} - -// IO Port can either be annotated with padName + padSide OR noPad (mutually exclusive) -abstract class IOAnnotation { - def serialize: String -} - -case class IOPadAnnotation(padSide: String, padName: String) extends IOAnnotation { - def serialize: String = PadAnnotationsYaml.serialize(this) - def getPadSide: PadSide = HasPadAnnotation.getSide(padSide) -} - -case class NoIOPadAnnotation(noPad: String = "") extends IOAnnotation { - def serialize: String = PadAnnotationsYaml.serialize(this) - def field: String = "noPad:" -} - -// Firrtl version -case class TargetIOPadAnnoF(target: ComponentName, anno: IOAnnotation) - extends FirrtlPadTransformAnnotation with SingleTargetAnnotation[ComponentName] { - - def duplicate(n: ComponentName): TargetIOPadAnnoF = this.copy(target = n) - def targetName: String = target.name -} - -// A bunch of supply pads (designated by name, # on each chip side) can be associated with the top module -case class SupplyAnnotation( - padName: String, - leftSide: Int = 0, - rightSide: Int = 0, - topSide: Int = 0, - bottomSide: Int = 0) - -// The chip top should have a default pad side, a pad template file, and supply annotations -case class ModulePadAnnotation( - defaultPadSide: String = Top.serialize, - coreWidth: Int = 0, - coreHeight: Int = 0, - supplyAnnos: Seq[SupplyAnnotation] = Seq.empty) { - - def serialize: String = PadAnnotationsYaml.serialize(this) - def supplyPadNames: Seq[String] = supplyAnnos.map(_.padName) - require(supplyPadNames.distinct.length == supplyPadNames.length, "Supply pads should only be specified once!") - def getDefaultPadSide: PadSide = HasPadAnnotation.getSide(defaultPadSide) -} - -// Firrtl version -case class TargetModulePadAnnoF(target: ModuleName, anno: ModulePadAnnotation) - extends FirrtlPadTransformAnnotation with SingleTargetAnnotation[ModuleName] { - - def duplicate(n: ModuleName): TargetModulePadAnnoF = this.copy(target = n) - def targetName: String = target.name -} - - -case class CollectedAnnos( - componentAnnos: Seq[TargetIOPadAnnoF], - moduleAnnos: TargetModulePadAnnoF) { - def supplyAnnos = moduleAnnos.anno.supplyAnnos - def defaultPadSide = moduleAnnos.anno.defaultPadSide - def topModName = moduleAnnos.targetName - def coreWidth = moduleAnnos.anno.coreWidth - def coreHeight = moduleAnnos.anno.coreHeight -} - -object HasPadAnnotation { - - def getSide(a: String): PadSide = a match { - case i if i == Left.serialize => Left - case i if i == Right.serialize => Right - case i if i == Top.serialize => Top - case i if i == Bottom.serialize => Bottom - case _ => throw new Exception(s" $a not a valid pad side annotation!") - } - - //TODO: PORT-1.4: Remove commented code -// def unapply(a: Annotation): Option[FirrtlPadTransformAnnotation] = a match { -// case Annotation(f, t, s) if t == classOf[AddIOPadsTransform] => f match { -// case m: ModuleName => -// Some(TargetModulePadAnnoF(m, s.parseYaml.convertTo[ModulePadAnnotation])) -// case c: ComponentName if s.contains(NoIOPadAnnotation().field) => -// Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[NoIOPadAnnotation])) -// case c: ComponentName => -// Some(TargetIOPadAnnoF(c, s.parseYaml.convertTo[IOPadAnnotation])) -// case _ => throw new Exception("Annotation only valid on module or component") -// } -// case _ => None -// } - - def apply(annos: Seq[Annotation]): Option[CollectedAnnos] = { - // Get all pad-related annotations (config files, pad sides, pad names, etc.) - val padAnnos = annos.flatMap { - case a: TargetModulePadAnnoF => Some(a) - case a: TargetIOPadAnnoF => Some(a) - case _ => None - } - val targets = padAnnos.map(x => x.targetName) - require(targets.distinct.length == targets.length, "Only 1 pad related annotation is allowed per component/module") - if (padAnnos.length == 0) { - None - } else { - val moduleAnnosTemp = padAnnos.filter { - case TargetModulePadAnnoF(_, _) => true - case _ => false - } - require(moduleAnnosTemp.length == 1, "Only 1 module may be designated 'Top'") - val moduleAnnos = moduleAnnosTemp.head - val topModName = moduleAnnos.targetName - val componentAnnos = padAnnos.filter { - case TargetIOPadAnnoF(ComponentName(_, ModuleName(n, _)), _) if n == topModName => - true - case TargetIOPadAnnoF(ComponentName(_, ModuleName(n, _)), _) if n != topModName => - throw new Exception("Pad related component annotations must all be in the same top module") - case _ => false - }.map(x => x.asInstanceOf[TargetIOPadAnnoF]) - Some(CollectedAnnos(componentAnnos, moduleAnnos.asInstanceOf[TargetModulePadAnnoF])) - } - } -} diff --git a/tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala b/tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala deleted file mode 100644 index cb3420b13..000000000 --- a/tapeout/src/main/scala/transforms/.pads/PadDescriptors.scala +++ /dev/null @@ -1,49 +0,0 @@ -package barstools.tapeout.transforms.pads - -import firrtl._ -import firrtl.ir._ - -abstract class PadOrientation extends FirrtlNode -case object Horizontal extends PadOrientation { - def serialize: String = "horizontal" -} -case object Vertical extends PadOrientation { - def serialize: String = "vertical" -} - -abstract class PadType extends FirrtlNode -case object DigitalPad extends PadType { - def serialize: String = "digital" - def inName: String = "in" - def outName: String = "out" -} -case object AnalogPad extends PadType { - def serialize: String = "analog" - def ioName: String = "io" -} -case object SupplyPad extends PadType { - def serialize: String = "supply" -} -case object NoPad extends PadType { - def serialize: String = "none" -} - -abstract class PadSide extends FirrtlNode { - def orientation: PadOrientation -} -case object Left extends PadSide { - def serialize: String = "left" - def orientation: PadOrientation = Horizontal -} -case object Right extends PadSide { - def serialize: String = "right" - def orientation: PadOrientation = Horizontal -} -case object Top extends PadSide { - def serialize: String = "top" - def orientation: PadOrientation = Vertical -} -case object Bottom extends PadSide { - def serialize: String = "bottom" - def orientation: PadOrientation = Vertical -} diff --git a/tapeout/src/main/scala/transforms/.pads/PadPlacement.scala b/tapeout/src/main/scala/transforms/.pads/PadPlacement.scala deleted file mode 100644 index d3e996266..000000000 --- a/tapeout/src/main/scala/transforms/.pads/PadPlacement.scala +++ /dev/null @@ -1,158 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.pads - -import barstools.tapeout.transforms._ -import net.jcazevedo.moultingyaml._ - -/** This is a hack to get around weird problem with yaml parser - * that without this gives PadPlacement defines additional fields - * - */ -trait HasPadPlacementFields { - def file: String - def left: String - def top: String - def right: String - def bottom: String - def instanceArray: String - def padLine: String - def template: String -} - -case class PadPlacementFields( - file: String, - left: String, - top: String, - right: String, - bottom: String, - instanceArray: String, - padLine: String, - template: String -) extends HasPadPlacementFields - -case class PadPlacement( - file: String, - left: String, - top: String, - right: String, - bottom: String, - instanceArray: String, - padLine: String, - template: String -) extends HasPadPlacementFields { - - require(instanceArray contains "{{signal}}", "Instance Array Template should contain {{signal}}") - require(instanceArray contains "{{idx}}", "Instance Array Template should contain {{idx}}") - require(padLine contains "{{padInst}}", "Pad line should contain {{padInst}}") - require(padLine contains "{{side}}", "Pad line should contain {{side}} (Can be in comments)") - require(padLine contains "{{padIdx}}", "Pad line should contain {{padIdx}} (Can be in comments)") - require(template contains "{{leftPads}}", "Pad line should contain {{leftPads}}") - require(template contains "{{rightPads}}", "Pad line should contain {{rightPads}}") - require(template contains "{{topPads}}", "Pad line should contain {{topPads}}") - require(template contains "{{bottomPads}}", "Pad line should contain {{bottomPads}}") - - def getSideString(s: PadSide): String = s match { - case Left => left - case Right => right - case Top => top - case Bottom => bottom - } - - import com.gilt.handlebars.scala.Handlebars - import com.gilt.handlebars.scala.binding.dynamic._ - - private val instanceArrayTemplate = Handlebars(instanceArray.stripMargin) - private val padLineTemplate = Handlebars(padLine.stripMargin) - private val padPlacementTemplate = Handlebars(template.stripMargin) - - def getInstanceArray(p: InstanceArrayParams): String = instanceArrayTemplate(p).stripMargin - def getPadLine(p: PadLineParams): String = padLineTemplate(p).stripMargin.replace(""", "\"") - def getPadPlacement(p: PadPlacementParams): String = padPlacementTemplate(p).stripMargin.replace(""", "\"") - -} - -case class InstanceArrayParams(signal: String, idx: Int) -case class PadLineParams(padInst: String, side: String, padIdx: Int) -case class PadPlacementParams(leftPads: String, rightPads: String, topPads: String, bottomPads: String) - -object PadPlacementFile extends DefaultYamlProtocol { - val exampleResource = "/PadPlacement.yaml" - implicit val _pad = yamlFormat8(PadPlacementFields) - - def main(args: Array[String]): Unit = { - println(parse("RealTech/PadPlacement.yaml")) - } - - def parse(file: String = ""): PadPlacement = { - val fields = (new YamlFileReader(exampleResource)).parse[PadPlacementFields](file).head - PadPlacement( - file = fields.file, - left = fields.left, - top = fields.top, - right = fields.right, - bottom = fields.bottom, - instanceArray = fields.instanceArray, - padLine = fields.padLine, - template = fields.template - ) - } - - def generate( - techDir: String, - targetDir: String, - padFrameName: String, - portPads: Seq[PortIOPad], - supplyPads: Seq[TopSupplyPad]): Unit = { - - val file = techDir + exampleResource - if(techDir != "" && !(new java.io.File(file)).exists()) - throw new Exception("Technology directory must contain PadPlacement.yaml!") - val template = parse(if (techDir == "") "" else file) - - val leftPads = scala.collection.mutable.ArrayBuffer[String]() - val rightPads = scala.collection.mutable.ArrayBuffer[String]() - val topPads = scala.collection.mutable.ArrayBuffer[String]() - val bottomPads = scala.collection.mutable.ArrayBuffer[String]() - - def sort(side: PadSide, inst: String): Unit = side match { - case Left => leftPads += inst - case Right => rightPads += inst - case Top => topPads += inst - case Bottom => bottomPads += inst - } - - // TODO: Be smarter about supply placement (+ grouping?) between signals - // Supply pad instance name: padFrameName/firrtlBBName_padSide_#num/PAD[#supplySetNum] - supplyPads foreach { p => - val prefixes = p.arrayInstNamePrefix(padFrameName) - prefixes foreach { prefix => - (0 until p.supplySetNum) foreach { idx => - sort(p.padSide, template.getInstanceArray(InstanceArrayParams(prefix, idx))) - } - } - } - // IO pad instance name: padFrameName/firrtlBBName/getPadName[#portWidth]/PAD - portPads.filter(_.pad.nonEmpty) foreach { p => - val prefix = p.arrayInstNamePrefix(padFrameName) - (0 until p.portWidth).map(idx => - template.getInstanceArray(InstanceArrayParams(prefix, idx)) + p.arrayInstNameSuffix - ) foreach { x => sort(p.padSide, x) } - } - - def getLines(pads: Seq[String], side: PadSide): String = { - val seq = pads.zipWithIndex.map{ case (p, idx) => - template.getPadLine(PadLineParams(p, template.getSideString(side), idx)) } - seq.mkString("\n") - } - - val fileContents = template.getPadPlacement(PadPlacementParams( - leftPads = getLines(leftPads.toSeq, Left), - rightPads = getLines(rightPads.toSeq, Right), - topPads = getLines(topPads.toSeq, Top), - bottomPads = getLines(bottomPads.toSeq, Bottom) - )) - - WriteConfig(targetDir, template.file, fileContents) - } -} \ No newline at end of file diff --git a/tapeout/src/test/resources/PadPlacement.io b/tapeout/src/test/resources/PadPlacement.io deleted file mode 100644 index 435ce274c..000000000 --- a/tapeout/src/test/resources/PadPlacement.io +++ /dev/null @@ -1,236 +0,0 @@ -(globals - version = 3 - io_order = default -) -(iopad - (bottomleft - (inst name="corner_ll" cell="CORNER_EXAMPLE" ) - ) - (bottomright - (inst name="corner_lr" orientation=MY cell="CORNER_EXAMPLE" ) - ) - (topleft - (inst name="corner_ul" orientation=MX cell="CORNER_EXAMPLE" ) - ) - (topright - (inst name="corner_ur" cell="CORNER_EXAMPLE" ) - ) - (left - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_horizontal_left_0/PAD[0]") # Side: 1, Order: 0 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_horizontal_left_1/PAD[0]") # Side: 1, Order: 1 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_horizontal_left_2/PAD[0]") # Side: 1, Order: 2 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[0]/PAD") # Side: 1, Order: 3 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[1]/PAD") # Side: 1, Order: 4 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[2]/PAD") # Side: 1, Order: 5 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[3]/PAD") # Side: 1, Order: 6 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[4]/PAD") # Side: 1, Order: 7 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[5]/PAD") # Side: 1, Order: 8 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[6]/PAD") # Side: 1, Order: 9 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[7]/PAD") # Side: 1, Order: 10 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[8]/PAD") # Side: 1, Order: 11 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[9]/PAD") # Side: 1, Order: 12 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[10]/PAD") # Side: 1, Order: 13 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[11]/PAD") # Side: 1, Order: 14 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[12]/PAD") # Side: 1, Order: 15 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[13]/PAD") # Side: 1, Order: 16 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_a/pad_digital_from_tristate_foundry_horizontal_input[14]/PAD") # Side: 1, Order: 17 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[0]/PAD") # Side: 1, Order: 18 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[1]/PAD") # Side: 1, Order: 19 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[2]/PAD") # Side: 1, Order: 20 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[3]/PAD") # Side: 1, Order: 21 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[4]/PAD") # Side: 1, Order: 22 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[5]/PAD") # Side: 1, Order: 23 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[6]/PAD") # Side: 1, Order: 24 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[7]/PAD") # Side: 1, Order: 25 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[8]/PAD") # Side: 1, Order: 26 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[9]/PAD") # Side: 1, Order: 27 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[10]/PAD") # Side: 1, Order: 28 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[11]/PAD") # Side: 1, Order: 29 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[12]/PAD") # Side: 1, Order: 30 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[13]/PAD") # Side: 1, Order: 31 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_b/pad_digital_from_tristate_foundry_horizontal_input[14]/PAD") # Side: 1, Order: 32 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[0]/PAD") # Side: 1, Order: 33 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[1]/PAD") # Side: 1, Order: 34 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[2]/PAD") # Side: 1, Order: 35 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[3]/PAD") # Side: 1, Order: 36 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[4]/PAD") # Side: 1, Order: 37 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[5]/PAD") # Side: 1, Order: 38 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[6]/PAD") # Side: 1, Order: 39 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[7]/PAD") # Side: 1, Order: 40 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[8]/PAD") # Side: 1, Order: 41 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[9]/PAD") # Side: 1, Order: 42 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[10]/PAD") # Side: 1, Order: 43 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[11]/PAD") # Side: 1, Order: 44 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[12]/PAD") # Side: 1, Order: 45 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_input_array_io_c/pad_digital_from_tristate_foundry_horizontal_input[13]/PAD") # Side: 1, Order: 46 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[0]/PAD") # Side: 1, Order: 47 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[1]/PAD") # Side: 1, Order: 48 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[2]/PAD") # Side: 1, Order: 49 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[3]/PAD") # Side: 1, Order: 50 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[4]/PAD") # Side: 1, Order: 51 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[5]/PAD") # Side: 1, Order: 52 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[6]/PAD") # Side: 1, Order: 53 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[7]/PAD") # Side: 1, Order: 54 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[8]/PAD") # Side: 1, Order: 55 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[9]/PAD") # Side: 1, Order: 56 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[10]/PAD") # Side: 1, Order: 57 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[11]/PAD") # Side: 1, Order: 58 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[12]/PAD") # Side: 1, Order: 59 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[13]/PAD") # Side: 1, Order: 60 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[14]/PAD") # Side: 1, Order: 61 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_x/pad_digital_from_tristate_foundry_horizontal_output[15]/PAD") # Side: 1, Order: 62 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_fast_custom_horizontal_array_io_analog1/pad_analog_fast_custom_horizontal[0]/PAD") # Side: 1, Order: 63 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_fast_custom_horizontal_array_io_analog1/pad_analog_fast_custom_horizontal[1]/PAD") # Side: 1, Order: 64 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_fast_custom_horizontal_array_io_analog1/pad_analog_fast_custom_horizontal[2]/PAD") # Side: 1, Order: 65 - - ) - (right - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vss_horizontal_right_0/PAD[0]") # Side: 3, Order: 0 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vss_horizontal_right_0/PAD[1]") # Side: 3, Order: 1 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[0]/PAD") # Side: 3, Order: 2 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[1]/PAD") # Side: 3, Order: 3 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[2]/PAD") # Side: 3, Order: 4 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[3]/PAD") # Side: 3, Order: 5 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0/pad_digital_from_tristate_foundry_horizontal_output[4]/PAD") # Side: 3, Order: 6 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[0]/PAD") # Side: 3, Order: 7 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[1]/PAD") # Side: 3, Order: 8 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[2]/PAD") # Side: 3, Order: 9 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[3]/PAD") # Side: 3, Order: 10 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1/pad_digital_from_tristate_foundry_horizontal_output[4]/PAD") # Side: 3, Order: 11 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[0]/PAD") # Side: 3, Order: 12 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[1]/PAD") # Side: 3, Order: 13 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[2]/PAD") # Side: 3, Order: 14 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[3]/PAD") # Side: 3, Order: 15 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2/pad_digital_from_tristate_foundry_horizontal_output[4]/PAD") # Side: 3, Order: 16 - - ) - (top - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_input_array_reset/pad_digital_from_tristate_foundry_vertical_input[0]/PAD") # Side: 2, Order: 0 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[0]/PAD") # Side: 2, Order: 1 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[1]/PAD") # Side: 2, Order: 2 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[2]/PAD") # Side: 2, Order: 3 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[3]/PAD") # Side: 2, Order: 4 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[4]/PAD") # Side: 2, Order: 5 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[5]/PAD") # Side: 2, Order: 6 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[6]/PAD") # Side: 2, Order: 7 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[7]/PAD") # Side: 2, Order: 8 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[8]/PAD") # Side: 2, Order: 9 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[9]/PAD") # Side: 2, Order: 10 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[10]/PAD") # Side: 2, Order: 11 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[11]/PAD") # Side: 2, Order: 12 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[12]/PAD") # Side: 2, Order: 13 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[13]/PAD") # Side: 2, Order: 14 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[14]/PAD") # Side: 2, Order: 15 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_digital_from_tristate_foundry_vertical_output_array_io_z/pad_digital_from_tristate_foundry_vertical_output[15]/PAD") # Side: 2, Order: 16 - - ) - (bottom - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_vertical_bottom_0/PAD[0]") # Side: 4, Order: 0 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_supply_vdd_vertical_bottom_1/PAD[0]") # Side: 4, Order: 1 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_slow_foundry_vertical_array_io_analog2/pad_analog_slow_foundry_vertical[0]/PAD") # Side: 4, Order: 2 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_slow_foundry_vertical_array_io_analog2/pad_analog_slow_foundry_vertical[1]/PAD") # Side: 4, Order: 3 - - (inst name = "ExampleTopModuleWithBB_PadFrame/pad_analog_slow_foundry_vertical_array_io_analog2/pad_analog_slow_foundry_vertical[2]/PAD") # Side: 4, Order: 4 - - ) -) \ No newline at end of file diff --git a/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala b/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala deleted file mode 100644 index 12fcb4111..000000000 --- a/tapeout/src/test/scala/transforms/.pads/AddIOPadsSpec.scala +++ /dev/null @@ -1,268 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.pads - -import java.io.File - -import barstools.tapeout.transforms.HasSetTechnologyLocation -import chisel3._ -import chisel3.experimental._ -import chisel3.iotesters._ -import chisel3.stage.ChiselStage -import chisel3.util.HasBlackBoxInline -import firrtl._ -import org.scalatest.{FlatSpec, Matchers} - -class BB extends BlackBox with HasBlackBoxInline { - val io = IO(new Bundle { - val c = Input(SInt(14.W)) - val z = Output(SInt(16.W)) - val analog1 = Analog(3.W) - val analog2 = Analog(3.W) - }) - // Generates a "FakeBB.v" file with the following Verilog module - setInline( - "FakeBB.v", - s""" - |module BB( - | input [15:0] c, - | output [15:0] z, - | inout [2:0] analog1, - | inout [2:0] analog2 - |); - | always @* begin - | z = 2 * c; - | analog2 = analog1 + 1; - | end - |endmodule - """.stripMargin - ) -} - -// If no template file is provided, it'll use the default one (example) in the resource folder -// Default pad side is Top if no side is specified for a given IO -// You can designate the number of different supply pads on each chip side -class ExampleTopModuleWithBB - extends TopModule( - supplyAnnos = Seq( - SupplyAnnotation(padName = "vdd", leftSide = 3, bottomSide = 2), - SupplyAnnotation(padName = "vss", rightSide = 1) - ) - ) - with HasSetTechnologyLocation { - val io = IO(new Bundle { - val a = Input(UInt(15.W)) - val b = Input(a.cloneType) - val c = Input(SInt(14.W)) - val x = Output(UInt(16.W)) - val y = Output(x.cloneType) - val z = Output(SInt(16.W)) - val analog1 = Analog(3.W) - val analog2 = analog1.cloneType - val v = Output(Vec(3, UInt(5.W))) - }) - - setTechnologyLocation("./RealTech") - - // Can annotate aggregates with pad side location + pad name (should be a name in the yaml template) - annotatePad(io.v, Right, "from_tristate_foundry") - // Can annotate individual elements - annotatePad(io.analog1, Left, "fast_custom") - annotatePad(io.analog2, Bottom, "slow_foundry") - // Looks for a pad that matches the IO type (digital in, digital out, analog) if no name is specified - Seq(io.a, io.b, io.c, io.x).foreach { x => annotatePad(x, Left) } - // Some signals might not want pads associated with them - noPad(io.y) - // Clk might come directly from bump - noPad(clock) - - val bb = Module(new BB()) - bb.io.c := io.c - io.z := bb.io.z - bb.io.analog1 <> io.analog1 - bb.io.analog2 <> io.analog2 - - io.x := io.a + 1.U - io.y := io.b - 1.U - - io.v.foreach { lhs => lhs := io.a } - -} - -class SimpleTopModuleTester(c: ExampleTopModuleWithBB) extends PeekPokeTester(c) { - val ax = Seq(5, 3) - val bx = Seq(8, 2) - val cx = Seq(-11, -9) - for (i <- 0 until ax.length) { - poke(c.io.a, ax(i)) - poke(c.io.b, bx(i)) - poke(c.io.c, cx(i)) - expect(c.io.x, ax(i) + 1) - expect(c.io.y, bx(i) - 1) - expect(c.io.z, 2 * cx(i)) - c.io.v.foreach { out => expect(out, ax(i)) } - } - // Analog can't be peeked + poked -} - -// Notes: Annotations -// a in 15: left, default digital -// b in 15: left, default digital -// c in 14: left, default digital ; signed -// x out 16: left, default digital -// y out: NOPAD -// clk in: NOPAD -// analog1 3: left, fast_custom -// analog2 3: bottom, slow_foundry -// v (vec of 3 with 5, out): right, from_tristate_foundry -// reset in: UNSPECIFIED: top, default digital -// z out 16: UNSPECIFIED: top, default digital ; signed -// vdd, left: 3, group of 1 -// vdd, bottom: 2, group of 1 -// vss, right: 1, group of 2 -// Notes: Used pads -// digital horizontal (from_tristate_foundry) -// in + out -// analog fast_custom horizontal -// analog slow_foundry vertical -// digital vertical (from_tristate_foundry) -// in + out -// vdd horizontal -// vdd vertical -// vss horizontal - -class IOPadSpec extends FlatSpec with Matchers { - - def readOutputFile(dir: String, f: String): String = { - FileUtils.getText(dir + File.separator + f) - } - def readResource(resource: String): String = { - val stream = getClass.getResourceAsStream(resource) - scala.io.Source.fromInputStream(stream).mkString - } - - def checkOutputs(dir: String): Unit = { - // Show that black box source helper is run - //readOutputFile(dir, "black_box_verilog_files.f") should include ("pad_supply_vdd_horizontal.v") - - val padBBEx = s"""// Digital Pad Example - |// Signal Direction: Input - |// Pad Orientation: Horizontal - |// Call your instance PAD - |module pad_digital_from_tristate_foundry_horizontal_input( - | input in, - | output reg out - |); - | // Where you would normally dump your pad instance - | always @* begin - | out = in; - | end - |endmodule - | - |module pad_digital_from_tristate_foundry_horizontal_input_array #( - | parameter int WIDTH=1 - |)( - | input [WIDTH-1:0] in, - | output reg [WIDTH-1:0] out - |); - | pad_digital_from_tristate_foundry_horizontal_input pad_digital_from_tristate_foundry_horizontal_input[WIDTH-1:0]( - | .in(in), - | .out(out) - | );""".stripMargin - // Make sure black box templating is OK - readOutputFile(dir, "pad_digital_from_tristate_foundry_horizontal_input_array.v") should include(padBBEx) - - val verilog = readOutputFile(dir, "ExampleTopModuleWithBB.v") - // Pad frame + top should be exact - verilog should include(readResource("/PadAnnotationVerilogPart.v")) - // Pad Placement IO file should be exact - val padIO = readOutputFile(dir, "pads.io") - padIO should include(readResource("/PadPlacement.io")) - } - - behavior.of("Pad Annotations") - - it should "serialize pad annotations" in { - val noIOPadAnnotation = NoIOPadAnnotation("dog") - noIOPadAnnotation.serialize should include("noPad: dog") - - val ioPadAnnotation = IOPadAnnotation("left", "oliver") - ioPadAnnotation.serialize should include( - """padSide: left - |padName: oliver - |""".stripMargin) - - val modulePadAnnotation = ModulePadAnnotation( - "top", - 11, - 42, - Seq( - SupplyAnnotation("mypad, 1, 2 ,3 , 4"), - SupplyAnnotation("yourpad, 9, 8, 7, 6") - ) - ) - - modulePadAnnotation.serialize should be( - """defaultPadSide: top - |coreWidth: 11 - |coreHeight: 42 - |supplyAnnos: - |- rightSide: 0 - | padName: mypad, 1, 2 ,3 , 4 - | leftSide: 0 - | bottomSide: 0 - | topSide: 0 - |- rightSide: 0 - | padName: yourpad, 9, 8, 7, 6 - | leftSide: 0 - | bottomSide: 0 - | topSide: 0 - |""".stripMargin - ) - } - - behavior.of("top module with blackbox") - - it should "pass simple testbench" in { - val optionsManager = new TesterOptionsManager { - firrtlOptions = firrtlOptions.copy( - compilerName = "verilog" - ) - testerOptions = testerOptions.copy(isVerbose = true, backendName = "verilator", displayBase = 10) - commonOptions = commonOptions.copy(targetDirName = "test_run_dir/PadsTB") - } - iotesters.Driver.execute(() => new ExampleTopModuleWithBB, optionsManager) { c => - val dir = optionsManager.commonOptions.targetDirName - checkOutputs(dir) - new SimpleTopModuleTester(c) - } should be(true) - } - /* - it should "create proper IO pads + black box in low firrtl" in { - val optionsManager = new ExecutionOptionsManager("barstools") with HasChiselExecutionOptions with HasFirrtlOptions { - firrtlOptions = firrtlOptions.copy(compilerName = "low") - commonOptions = commonOptions.copy(targetDirName = "test_run_dir/LoFirrtl") - //commonOptions = commonOptions.copy(globalLogLevel = logger.LogLevel.Info) - } - val success = chisel3.Driver.execute(optionsManager, () => new ExampleTopModuleWithBB) match { - case ChiselExecutionSuccess(_, chirrtl, Some(FirrtlExecutionSuccess(_, firrtl))) => - firrtl should include ("ExampleTopModuleWithBB_PadFrame") - firrtl should include ("ExampleTopModuleWithBB_Internal") - firrtl should not include ("FakeBBPlaceholder") - true - case _ => false - } - success should be (true) - } - */ - it should "create proper IO pads + black box in verilog" in { - val dir = "test_run_dir/PadsVerilog" - (new ChiselStage).emitVerilog( - new ExampleTopModuleWithBB, -// Array("-td", dir, "-X", "verilog") - Array("-td", dir) - ) - checkOutputs(dir) - } - -} From fc3a3eabff12b28dac417027dc3da62f9838876f Mon Sep 17 00:00:00 2001 From: Tim Snyder Date: Wed, 21 Oct 2020 21:08:51 +0000 Subject: [PATCH 184/273] Update MacroCompiler for Chisel 3.4 --- macros/src/main/scala/MacroCompiler.scala | 26 +++++++++++------------ 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index cb24bc9cf..53d178476 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -23,6 +23,14 @@ import Utils._ case class MacroCompilerException(msg: String) extends Exception(msg) +// TODO The parameters could be unpacked here instead of keeping it in a serialized form +case class MacroCompilerAnnotation(content: String) extends NoTargetAnnotation { + import MacroCompilerAnnotation.Params + + def params: Params = MacroCompilerUtil.objFromString(content).asInstanceOf[Params] +} + + /** * The MacroCompilerAnnotation to trigger the macro compiler. * Note that this annotation does NOT actually target any modules for @@ -32,7 +40,6 @@ case class MacroCompilerException(msg: String) extends Exception(msg) * To use, simply annotate the entire circuit itself with this annotation and * include [[MacroCompilerTransform]]. * - * TODO: make this into a "true" annotation? */ object MacroCompilerAnnotation { /** Macro compiler mode. */ @@ -92,16 +99,9 @@ object MacroCompilerAnnotation { * @param c Top-level circuit name (see class description) * @param p Parameters (see above). */ - def apply(c: String, p: Params): Annotation = - Annotation(CircuitName(c), classOf[MacroCompilerTransform], MacroCompilerUtil.objToString(p)) + def apply(c: String, p: Params): MacroCompilerAnnotation = + MacroCompilerAnnotation(MacroCompilerUtil.objToString(p)) - def unapply(a: Annotation) = a match { - case Annotation(CircuitName(c), t, serialized) if t == classOf[MacroCompilerTransform] => { - val p: Params = MacroCompilerUtil.objFromString(serialized).asInstanceOf[Params] - Some(c, p) - } - case _ => None - } } class MacroCompilerPass(mems: Option[Seq[Macro]], @@ -656,9 +656,9 @@ class MacroCompilerTransform extends Transform { def inputForm = MidForm def outputForm = MidForm - def execute(state: CircuitState) = getMyAnnotations(state) match { - case Seq(MacroCompilerAnnotation(state.circuit.main, - MacroCompilerAnnotation.Params(memFile, memFileFormat, libFile, hammerIR, costMetric, mode, useCompiler, forceCompile, forceSynflops))) => + def execute(state: CircuitState) = state.annotations match { + case Seq(anno: MacroCompilerAnnotation) => + val MacroCompilerAnnotation.Params(memFile, memFileFormat, libFile, hammerIR, costMetric, mode, useCompiler, forceCompile, forceSynflops) = anno.params if (mode == MacroCompilerAnnotation.FallbackSynflops) { throw new UnsupportedOperationException("Not implemented yet") } From aca4bd579f03acf5a06add2de5b005cdfa6c25ec Mon Sep 17 00:00:00 2001 From: Tim Snyder Date: Fri, 23 Oct 2020 18:01:06 +0000 Subject: [PATCH 185/273] update build.sbt for Chisel3.4/FIRRTL1.4 --- build.sbt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build.sbt b/build.sbt index 9ec44bf7c..fa76c3256 100644 --- a/build.sbt +++ b/build.sbt @@ -1,8 +1,8 @@ // See LICENSE for license details. val defaultVersions = Map( - "chisel3" -> "3.2-SNAPSHOT", - "chisel-iotesters" -> "1.3-SNAPSHOT" + "chisel3" -> "3.4.0", + "chisel-iotesters" -> "1.5.0" ) lazy val commonSettings = Seq( @@ -31,7 +31,7 @@ lazy val macros = (project in file("macros")) .settings(commonSettings) .settings(Seq( libraryDependencies ++= Seq( - "edu.berkeley.cs" %% "firrtl-interpreter" % "1.2-SNAPSHOT" % Test + "edu.berkeley.cs" %% "firrtl-interpreter" % "1.4.0" % Test ), mainClass := Some("barstools.macros.MacroCompiler") )) From 446cb84cbfe9b776d62ab4dcef0075c4a645723b Mon Sep 17 00:00:00 2001 From: Tim Snyder Date: Fri, 23 Oct 2020 18:02:35 +0000 Subject: [PATCH 186/273] fixup! Update MacroCompiler for Chisel 3.4 Need to collect the annotations into a Seq. Also updated the macros project tests. --- macros/src/main/scala/MacroCompiler.scala | 2 +- macros/src/test/scala/MacroCompilerSpec.scala | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/MacroCompiler.scala index 53d178476..c057baa60 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/MacroCompiler.scala @@ -656,7 +656,7 @@ class MacroCompilerTransform extends Transform { def inputForm = MidForm def outputForm = MidForm - def execute(state: CircuitState) = state.annotations match { + def execute(state: CircuitState) = state.annotations.collect { case a: MacroCompilerAnnotation => a } match { case Seq(anno: MacroCompilerAnnotation) => val MacroCompilerAnnotation.Params(memFile, memFileFormat, libFile, hammerIR, costMetric, mode, useCompiler, forceCompile, forceSynflops) = anno.params if (mode == MacroCompilerAnnotation.FallbackSynflops) { diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/MacroCompilerSpec.scala index dfecc0c1c..488f68fe7 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/MacroCompilerSpec.scala @@ -3,7 +3,7 @@ package barstools.macros import firrtl.ir.{Circuit, NoInfo} import firrtl.passes.RemoveEmpty import firrtl.Parser.parse -import firrtl.Utils.ceilLog2 +import firrtl.Utils.getUIntWidth import java.io.{File, StringWriter} import mdf.macrolib.SRAMMacro @@ -247,10 +247,10 @@ trait HasSimpleTestGenerator { val v = s"${generatorType}${extraTagPrefixed}.v" lazy val mem_name = "target_memory" - val mem_addr_width = ceilLog2(memDepth) + val mem_addr_width = getUIntWidth(memDepth-1) lazy val lib_name = "awesome_lib_mem" - val lib_addr_width = ceilLog2(libDepth) + val lib_addr_width = getUIntWidth(libDepth-1) // Override these to change the port prefixes if needed. def libPortPrefix: String = "lib" From 9be550e23d2f6a2968f35719ba55edb8aefaf138 Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Wed, 25 Nov 2020 16:27:52 -0800 Subject: [PATCH 187/273] Bump to new dep. API | Automatically avoid renaming ExtMod's and circuit top mod --- .../transforms/AddSuffixToModuleNames.scala | 36 ++++++++++--------- .../transforms/AvoidExtModuleCollisions.scala | 15 +++++--- .../transforms/ConvertToExtModPass.scala | 18 +++++++--- .../scala/transforms/EnumerateModules.scala | 1 + .../src/main/scala/transforms/Generate.scala | 7 ++-- .../scala/transforms/ReParentCircuit.scala | 15 +++++--- .../transforms/RemoveUnusedModules.scala | 27 ++++++++------ .../main/scala/transforms/ResetInverter.scala | 1 + 8 files changed, 77 insertions(+), 43 deletions(-) diff --git a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala index ab1dd4873..26de5425d 100644 --- a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala +++ b/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala @@ -4,10 +4,10 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ -import firrtl.annotations._ import firrtl.Mappers._ -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency +import firrtl.annotations.{ModuleTarget, SingleTargetAnnotation, CircuitTarget} +import firrtl.stage.TransformManager.{TransformDependency} +import firrtl.stage.{Forms} case class KeepNameAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { @@ -19,32 +19,36 @@ case class ModuleNameSuffixAnnotation(target: CircuitTarget, suffix: String) def duplicate(n: CircuitTarget) = this.copy(target = n) } -// This doesn't rename ExtModules under the assumption that they're some -// Verilog black box and therefore can't be renamed. Since the point is to -// allow FIRRTL to be linked together using "cat" and ExtModules don't get -// emitted, this should be safe. class AddSuffixToModuleNames extends Transform with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def invalidates(a: Transform): Boolean = false - def processAnnos(annos: AnnotationSeq): (AnnotationSeq, (String) => String) = { - val whitelist = annos.collect({ case KeepNameAnnotation(tgt) => tgt.module }).toSet - val newAnnos = annos.filterNot(_.isInstanceOf[ModuleNameSuffixAnnotation]) - val suffixes = annos.collect({ case ModuleNameSuffixAnnotation(_, suffix) => suffix }) + def determineRenamerandAnnos(state: CircuitState): (AnnotationSeq, (String) => String) = { + // remove determine suffix annotation + val newAnnos = state.annotations.filterNot(_.isInstanceOf[ModuleNameSuffixAnnotation]) + val suffixes = state.annotations.collect({ case ModuleNameSuffixAnnotation(_, suffix) => suffix }) require(suffixes.length <= 1) - val suffix = suffixes.headOption.getOrElse("") - val renamer = { name: String => if (whitelist(name)) name else name + suffix } + + // skip renaming ExtModules and top-level module + val excludeSet = state.circuit.modules.flatMap { + case e: ExtModule => Some(e.name) + case m if (m.name == state.circuit.main) => Some(m.name) + case _ => None + }.toSet + + val renamer = { (name: String) => if (excludeSet(name)) name else name + suffix } + (newAnnos, renamer) } def renameInstanceModules(renamer: (String) => String)(stmt: Statement): Statement = { stmt match { case m: DefInstance => new DefInstance(m.info, m.name, renamer(m.module)) - case m: WDefInstance => new WDefInstance(m.info, m.name, renamer(m.module), m.tpe) - case s => s map renameInstanceModules(renamer) + case s => s.map(renameInstanceModules(renamer)) // if is statement, recurse } } @@ -61,7 +65,7 @@ class AddSuffixToModuleNames extends Transform with DependencyAPIMigration { } def execute(state: CircuitState): CircuitState = { - val (newAnnos, renamer) = processAnnos(state.annotations) + val (newAnnos, renamer) = determineRenamerandAnnos(state) val (ret, renames) = run(state, renamer) state.copy(circuit = ret, annotations = newAnnos, renames = Some(renames)) } diff --git a/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala b/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala index df1e272e7..74dfda2fd 100644 --- a/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala +++ b/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala @@ -4,13 +4,20 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ -import firrtl.annotations._ +import firrtl.annotations.{NoTargetAnnotation} +import firrtl.options.{Dependency} +import firrtl.stage.TransformManager.{TransformDependency} +import firrtl.stage.{Forms} case class LinkExtModulesAnnotation(mustLink: Seq[ExtModule]) extends NoTargetAnnotation -class AvoidExtModuleCollisions extends Transform { - def inputForm = HighForm - def outputForm = HighForm +class AvoidExtModuleCollisions extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.HighForm + override def optionalPrerequisites: Seq[TransformDependency] = Seq(Dependency[RemoveUnusedModules]) + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.HighEmitters + override def invalidates(a: Transform): Boolean = false + def execute(state: CircuitState): CircuitState = { val mustLink = state.annotations.flatMap { case LinkExtModulesAnnotation(mustLink) => mustLink diff --git a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala index 83486fd51..04e645fd6 100644 --- a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala +++ b/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala @@ -3,9 +3,12 @@ package barstools.tapeout.transforms import firrtl._ -import firrtl.annotations._ import firrtl.ir._ -import firrtl.passes.Pass +import firrtl.annotations.{ModuleTarget, SingleTargetAnnotation, ReferenceTarget} +import firrtl.stage.TransformManager.{TransformDependency} +import firrtl.stage.{Forms} +import firrtl.options.{Dependency} +import firrtl.passes.memlib.{ReplSeqMem} case class ConvertToExtModAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { @@ -15,9 +18,14 @@ case class ConvertToExtModAnnotation(target: ModuleTarget) // Converts some modules to external modules, based on a given function. If // that function returns "true" then the module is converted into an ExtModule, // otherwise it's left alone. -class ConvertToExtMod extends Transform { - def inputForm = HighForm - def outputForm = HighForm +class ConvertToExtMod extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.HighForm + override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty + override def optionalPrerequisiteOf: Seq[TransformDependency] = { + Forms.HighEmitters ++ Seq(Dependency[RemoveUnusedModules], Dependency[ReplSeqMem]) + } + override def invalidates(a: Transform): Boolean = false def run(state: CircuitState, makeExt: Set[String]): (Circuit, RenameMap) = { val renames = RenameMap() diff --git a/tapeout/src/main/scala/transforms/EnumerateModules.scala b/tapeout/src/main/scala/transforms/EnumerateModules.scala index a2b499fd8..6a732d754 100644 --- a/tapeout/src/main/scala/transforms/EnumerateModules.scala +++ b/tapeout/src/main/scala/transforms/EnumerateModules.scala @@ -28,6 +28,7 @@ class EnumerateModules(enumerate: (Module) => Unit) override def prerequisites: Seq[TransformDependency] = Forms.LowForm override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def invalidates(a: Transform): Boolean = false def transforms: Seq[Transform] = Seq(new EnumerateModulesPass(enumerate)) diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/transforms/Generate.scala index 12e222e56..17b8781df 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/transforms/Generate.scala @@ -5,7 +5,7 @@ import firrtl.annotations._ import firrtl.ir._ import firrtl.passes.memlib.ReplSeqMemAnnotation import firrtl.stage.FirrtlCircuitAnnotation -import firrtl.transforms.BlackBoxResourceFileNameAnno +import firrtl.transforms.{BlackBoxResourceFileNameAnno, DedupModules} import logger.LazyLogging trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => @@ -156,6 +156,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => // FIRRTL options lazy val annoFiles = firrtlOptions.annotationFileNames + // order is determined by DependencyAPIMigration val topTransforms = Seq( new ReParentCircuit, new RemoveUnusedModules @@ -171,6 +172,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => annotations = firrtlOptions.annotations ++ topAnnos ) + // order is determined by DependencyAPIMigration val harnessTransforms = Seq( new ConvertToExtMod, new RemoveUnusedModules, @@ -216,11 +218,8 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => // Execute top and get list of ExtModules to avoid collisions val topExtModules = executeTop() - val externals = Seq("SimSerial", "SimDTM", "plusarg_reader") ++ harnessTop ++ synTop - val harnessAnnos = tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)).toSeq ++ - externals.map(ext => KeepNameAnnotation(rootCircuitTarget.module(ext))) ++ harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) :+ LinkExtModulesAnnotation(topExtModules) diff --git a/tapeout/src/main/scala/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/transforms/ReParentCircuit.scala index 574d9dda5..cbf4d2f85 100644 --- a/tapeout/src/main/scala/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/transforms/ReParentCircuit.scala @@ -4,17 +4,24 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ -import firrtl.passes.Pass import firrtl.annotations._ +import firrtl.options.{Dependency} +import firrtl.stage.TransformManager.{TransformDependency} +import firrtl.stage.{Forms} case class ReParentCircuitAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { def duplicate(n: ModuleTarget) = this.copy(n) } -class ReParentCircuit extends Transform { - def inputForm = HighForm - def outputForm = HighForm +class ReParentCircuit extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.HighForm + override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty + override def optionalPrerequisiteOf: Seq[TransformDependency] = { + Forms.HighEmitters :+ Dependency[RemoveUnusedModules] + } + override def invalidates(a: Transform): Boolean = false def execute(state: CircuitState): CircuitState = { val c = state.circuit diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala index 24eb35f64..3feb67363 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala @@ -4,15 +4,22 @@ package barstools.tapeout.transforms import firrtl._ import firrtl.ir._ -import firrtl.passes.Pass -import firrtl.annotations._ -import firrtl.transforms.DontTouchAnnotation +import firrtl.annotations.{ModuleTarget} +import firrtl.stage.TransformManager.{TransformDependency} +import firrtl.options.{Dependency} +import firrtl.stage.{Forms} +import firrtl.passes.memlib.{ReplSeqMem} // Removes all the unused modules in a circuit by recursing through every // instance (starting at the main module) -class RemoveUnusedModules extends Transform { - def inputForm = HighForm - def outputForm = HighForm +class RemoveUnusedModules extends Transform with DependencyAPIMigration { + + override def prerequisites: Seq[TransformDependency] = Forms.HighForm + override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty + override def optionalPrerequisiteOf: Seq[TransformDependency] = { + Forms.HighEmitters :+ Dependency[ReplSeqMem] + } + override def invalidates(a: Transform): Boolean = false def execute(state: CircuitState): CircuitState = { val modulesByName = state.circuit.modules.map{ @@ -25,19 +32,17 @@ class RemoveUnusedModules extends Transform { case Some(m) => { def someStatements(statement: Statement): Seq[Statement] = statement match { - case b: Block => + case b: Block => b.stmts.map{ someStatements(_) } .foldLeft(Seq[Statement]())(_ ++ _) case when: Conditionally => someStatements(when.conseq) ++ someStatements(when.alt) case i: DefInstance => Seq(i) - case w: WDefInstance => Seq(w) case _ => Seq() } someStatements(m.body).map{ case s: DefInstance => Set(s.module) | getUsedModules(modulesByName(s.module)) - case s: WDefInstance => Set(s.module) | getUsedModules(modulesByName(s.module)) case _ => Set[String]() }.foldLeft(Set(m.name))(_ | _) } @@ -52,7 +57,9 @@ class RemoveUnusedModules extends Transform { val renames = state.renames.getOrElse(RenameMap()) - state.circuit.modules.filterNot { usedModuleSet contains _.name } foreach { x => renames.record(ModuleTarget(state.circuit.main, x.name), Nil) } + state.circuit.modules.filterNot { usedModuleSet contains _.name } foreach { x => + renames.record(ModuleTarget(state.circuit.main, x.name), Nil) + } val newCircuit = Circuit(state.circuit.info, usedModuleSeq, state.circuit.main) state.copy(circuit = newCircuit, renames = Some(renames)) diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/transforms/ResetInverter.scala index f92822510..1ccb18888 100644 --- a/tapeout/src/main/scala/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/transforms/ResetInverter.scala @@ -46,6 +46,7 @@ class ResetInverterTransform extends Transform with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def invalidates(a: Transform): Boolean = false override def execute(state: CircuitState): CircuitState = { state.annotations.filter(_.isInstanceOf[ResetInverterAnnotation]) match { From fa699af02635681c8af90f2169a6705fe5e3e37a Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Fri, 27 Nov 2020 17:34:16 -0800 Subject: [PATCH 188/273] Add missing dependency to put AvoidExtModuleCollisions before ReplSeqMem --- .../src/main/scala/transforms/AvoidExtModuleCollisions.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala b/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala index 74dfda2fd..76ca10061 100644 --- a/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala +++ b/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala @@ -8,6 +8,7 @@ import firrtl.annotations.{NoTargetAnnotation} import firrtl.options.{Dependency} import firrtl.stage.TransformManager.{TransformDependency} import firrtl.stage.{Forms} +import firrtl.passes.memlib.{ReplSeqMem} case class LinkExtModulesAnnotation(mustLink: Seq[ExtModule]) extends NoTargetAnnotation @@ -15,7 +16,9 @@ class AvoidExtModuleCollisions extends Transform with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.HighForm override def optionalPrerequisites: Seq[TransformDependency] = Seq(Dependency[RemoveUnusedModules]) - override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.HighEmitters + override def optionalPrerequisiteOf: Seq[TransformDependency] = { + Forms.HighEmitters :+ Dependency[ReplSeqMem] + } override def invalidates(a: Transform): Boolean = false def execute(state: CircuitState): CircuitState = { From 3a29f535726a191d09164470eb1ce1a1ddd5bf9a Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Mon, 30 Nov 2020 21:11:24 -0800 Subject: [PATCH 189/273] Use stable dep. versions | Small bumps/cleanup --- .gitignore | 4 ++-- build.sbt | 20 ++++++++------------ project/build.properties | 2 +- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/.gitignore b/.gitignore index f4406576a..12c0f6ad3 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ src/main/scala/dsptools/sandbox.sc test_run_dir/ *.fir *.f -*.anno +*.anno.json ### XilinxISE template # intermediate build files @@ -345,4 +345,4 @@ project/plugins/project/ hs_err_pid* # ignore lib from rocket build -lib \ No newline at end of file +lib diff --git a/build.sbt b/build.sbt index e291440d6..fd9638b11 100644 --- a/build.sbt +++ b/build.sbt @@ -1,14 +1,14 @@ // See LICENSE for license details. val defaultVersions = Map( - "chisel3" -> "3.4-SNAPSHOT", - "chisel-iotesters" -> "1.5-SNAPSHOT" + "chisel3" -> "3.4.+", + "chisel-iotesters" -> "1.5.+" ) lazy val commonSettings = Seq( organization := "edu.berkeley.cs", version := "0.1-SNAPSHOT", - scalaVersion := "2.12.8", + scalaVersion := "2.12.10", scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls", "-Xsource:2.11"), libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) @@ -19,7 +19,8 @@ lazy val commonSettings = Seq( ), resolvers ++= Seq( Resolver.sonatypeRepo("snapshots"), - Resolver.sonatypeRepo("releases") + Resolver.sonatypeRepo("releases"), + Resolver.mavenLocal ) ) @@ -29,21 +30,16 @@ lazy val mdf = (project in file("mdf/scalalib")) lazy val macros = (project in file("macros")) .dependsOn(mdf) .settings(commonSettings) - .settings(Seq( + .settings( libraryDependencies ++= Seq( - "edu.berkeley.cs" %% "firrtl-interpreter" % "1.4.0" % Test + "edu.berkeley.cs" %% "firrtl-interpreter" % "1.4.+" % Test ), mainClass := Some("barstools.macros.MacroCompiler") - )) + ) .enablePlugins(sbtassembly.AssemblyPlugin) lazy val tapeout = (project in file("tapeout")) .settings(commonSettings) - .settings(Seq( - libraryDependencies ++= Seq( - "io.github.daviddenton" %% "handlebars-scala-fork" % "2.3.0" - ) - )) .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) lazy val root = (project in file(".")).aggregate(macros, tapeout) diff --git a/project/build.properties b/project/build.properties index 72f902892..0837f7a13 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.2.7 +sbt.version=1.3.13 From 15fa68b3a40addc5ac77a78ced37497dbce3f687 Mon Sep 17 00:00:00 2001 From: David Biancolin Date: Fri, 11 Dec 2020 03:55:25 +0000 Subject: [PATCH 190/273] Bump MDF for updated scala version --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index 4281e8f62..4be9b1736 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 4281e8f621decc10a8cdb878c593e46115c70998 +Subproject commit 4be9b173647c77f990a542f4eb5f69af01d77316 From 62f311654a4b31ccbc2839beaee64cd770ecd4a0 Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Fri, 11 Dec 2020 14:11:08 -0800 Subject: [PATCH 191/273] Fix ResetInv test --- .../scala/transforms/ResetInverterSpec.scala | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala index fe2042880..9f23c3a87 100644 --- a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/transforms/ResetInverterSpec.scala @@ -3,7 +3,8 @@ package barstools.tapeout.transforms import chisel3._ -import chisel3.stage.ChiselStage +import chisel3.stage.{ChiselStage, ChiselGeneratorAnnotation} +import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation} import org.scalatest.{FreeSpec, Matchers} class ExampleModuleNeedsResetInverted extends Module with ResetInverter { @@ -20,14 +21,23 @@ class ExampleModuleNeedsResetInverted extends Module with ResetInverter { class ResetNSpec extends FreeSpec with Matchers { "Inverting reset needs to be done throughout module in Chirrtl" in { - val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--no-run-firrtl")) + val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted) chirrtl should include("input reset :") (chirrtl should not).include("input reset_n :") (chirrtl should not).include("node reset = not(reset_n)") } "Inverting reset needs to be done throughout module when generating firrtl" in { - val firrtl = (new ChiselStage).emitFirrtl(new ExampleModuleNeedsResetInverted) + // generate low-firrtl + val firrtl = (new ChiselStage).execute( + Array("-X", "low"), + Seq(ChiselGeneratorAnnotation(() => new ExampleModuleNeedsResetInverted)) + ).collect { + case EmittedFirrtlCircuitAnnotation(a) => a + case EmittedFirrtlModuleAnnotation(a) => a + }.map(_.value) + .mkString("") + firrtl should include("input reset_n :") firrtl should include("node reset = not(reset_n)") (firrtl should not).include("input reset :") From 26dce446eac2c5c5ab7bd5f062a6fc449f925db8 Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Fri, 11 Dec 2020 14:51:43 -0800 Subject: [PATCH 192/273] Generate LowFirrtl for Retime tests --- .../scala/transforms/retime/RetimeSpec.scala | 41 +++++++++++++------ 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala index 1f2de5a88..356789917 100644 --- a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala +++ b/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala @@ -2,10 +2,11 @@ package barstools.tapeout.transforms.retime.test -import barstools.tapeout.transforms.retime._ import chisel3._ -import chisel3.stage.ChiselStage -import firrtl._ +import chisel3.stage.{ChiselStage, ChiselGeneratorAnnotation} +import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation} +import barstools.tapeout.transforms.retime.RetimeLib +import firrtl.FileUtils import logger.Logger import org.scalatest.{FlatSpec, Matchers} @@ -18,20 +19,33 @@ class RetimeSpec extends FlatSpec with Matchers { val genClassName = gen.getClass.getName name + genClassName.hashCode.abs } + def getLowFirrtl[T <: RawModule](gen: () => T, extraArgs: Array[String] = Array.empty): String = { + // generate low firrtl + (new ChiselStage).execute( + Array("-X", "low") ++ extraArgs, + Seq(ChiselGeneratorAnnotation(gen)) + ).collect { + case EmittedFirrtlCircuitAnnotation(a) => a + case EmittedFirrtlModuleAnnotation(a) => a + }.map(_.value) + .mkString("") + } + behavior of "retime library" it should "pass simple retime module annotation" in { - val gen = () => new RetimeModule() + val gen = () => new RetimeModule val dir = uniqueDirName(gen, "RetimeModule") Logger.makeScope(Seq.empty) { val captor = new Logger.OutputCaptor Logger.setOutput(captor.printStream) - val firrtl = (new ChiselStage).emitFirrtl( - new RetimeModule(), - Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info") - ) + + // generate low firrtl + val firrtl = getLowFirrtl(gen, + Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info")) + firrtl.nonEmpty should be(true) //Make sure we got the RetimeTransform scheduled captor.getOutputAsString should include ("barstools.tapeout.transforms.retime.RetimeTransform") @@ -45,16 +59,17 @@ class RetimeSpec extends FlatSpec with Matchers { } it should "pass simple retime instance annotation" in { - val gen = () => new RetimeInstance() + val gen = () => new RetimeInstance val dir = uniqueDirName(gen, "RetimeInstance") Logger.makeScope(Seq.empty) { val captor = new Logger.OutputCaptor Logger.setOutput(captor.printStream) - val firrtl = (new ChiselStage).emitFirrtl( - new RetimeInstance(), - Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info") - ) + + // generate low firrtl + val firrtl = getLowFirrtl(gen, + Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info")) + firrtl.nonEmpty should be(true) //Make sure we got the RetimeTransform scheduled captor.getOutputAsString should include ("barstools.tapeout.transforms.retime.RetimeTransform") From 689ebdc06e29028861f3282d9af6f2304541c9db Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Fri, 11 Dec 2020 15:01:17 -0800 Subject: [PATCH 193/273] Add invalidates=false to RetimeTransform --- tapeout/src/main/scala/transforms/retime/Retime.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/tapeout/src/main/scala/transforms/retime/Retime.scala b/tapeout/src/main/scala/transforms/retime/Retime.scala index d88217c70..010ef40b9 100644 --- a/tapeout/src/main/scala/transforms/retime/Retime.scala +++ b/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -17,6 +17,7 @@ class RetimeTransform extends Transform with DependencyAPIMigration { override def prerequisites: Seq[TransformDependency] = Forms.LowForm override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def invalidates(a: Transform): Boolean = false override def execute(state: CircuitState): CircuitState = { state.annotations.filter(_.isInstanceOf[RetimeAnnotation]) match { From 0672411e390ef3507c9ae705ff2185e0bdf8cc7c Mon Sep 17 00:00:00 2001 From: Tim Snyder Date: Thu, 24 Dec 2020 00:45:55 +0000 Subject: [PATCH 194/273] bump mdf for ucb-bar/plsi-mdf#7 --- mdf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mdf b/mdf index 4be9b1736..e588024d7 160000 --- a/mdf +++ b/mdf @@ -1 +1 @@ -Subproject commit 4be9b173647c77f990a542f4eb5f69af01d77316 +Subproject commit e588024d706220b73f2c97ca75d6fec8dd0d41b1 From 1761d500f3238e44ce8a0c395e80ecd2861baf98 Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 1 Feb 2021 10:00:52 -0800 Subject: [PATCH 195/273] Get rid of scalastyle checkers. These are outdated and not used by the rest of the chisel family. Add the scalafmt file that is used to fix code formatting. This will be used to regularize the code. That work will be done by attrition. --- .scalafmt.conf | 4 +- scalastyle-config.xml | 110 ------------------------------------- scalastyle-test-config.xml | 109 ------------------------------------ 3 files changed, 2 insertions(+), 221 deletions(-) delete mode 100644 scalastyle-config.xml delete mode 100644 scalastyle-test-config.xml diff --git a/.scalafmt.conf b/.scalafmt.conf index f74e55047..c53cb6086 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -1,4 +1,4 @@ -version = 2.6.4 +version = 2.7.5 maxColumn = 120 align = most @@ -23,4 +23,4 @@ verticalMultiline.atDefnSite = true optIn.annotationNewlines = true -rewrite.rules = [SortImports, PreferCurlyFors, AvoidInfix] +rewrite.rules = [SortImports, PreferCurlyFors, AvoidInfix] \ No newline at end of file diff --git a/scalastyle-config.xml b/scalastyle-config.xml deleted file mode 100644 index 57ef60a26..000000000 --- a/scalastyle-config.xml +++ /dev/null @@ -1,110 +0,0 @@ - - Scalastyle standard configuration - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - No lines ending with a ; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |\|\||&&|:=|<>|<=|>=|!=|===|<<|>>|##|unary_(~|\-%?|!))$]]> - - - - - - - - - - - diff --git a/scalastyle-test-config.xml b/scalastyle-test-config.xml deleted file mode 100644 index bf32aacd4..000000000 --- a/scalastyle-test-config.xml +++ /dev/null @@ -1,109 +0,0 @@ - - Scalastyle configuration for Chisel3 unit tests - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - No lines ending with a ; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |\|\||&&|:=|<>|<=|>=|!=|===|<<|>>|##|unary_(~|\-%?|!))$]]> - - - - - - - - - - - From 19e51f3df507efa9371ab44f49ef6637123f30bc Mon Sep 17 00:00:00 2001 From: chick Date: Wed, 3 Feb 2021 17:46:12 -0800 Subject: [PATCH 196/273] Make the directory structure match the packages All tests run as they did prior to the changes --- .../Analog.scala | 0 .../IOCell.scala | 0 .../{ => barstools/macros}/CostMetric.scala | 3 ++- .../{ => barstools/macros}/MacroCompiler.scala | 17 ++++++++--------- .../scala/{ => barstools/macros}/SynFlops.scala | 6 +++--- .../scala/{ => barstools/macros}/Utils.scala | 12 +++++------- .../{ => barstools/macros}/CostFunction.scala | 0 .../{ => barstools/macros}/Functional.scala | 0 .../macros}/MacroCompilerSpec.scala | 8 ++++---- .../scala/{ => barstools/macros}/Masks.scala | 2 -- .../{ => barstools/macros}/MultiPort.scala | 0 .../{ => barstools/macros}/SRAMCompiler.scala | 2 -- .../macros}/SimpleSplitDepth.scala | 2 -- .../macros}/SimpleSplitWidth.scala | 0 .../macros}/SpecificExamples.scala | 0 .../scala/{ => barstools/macros}/SynFlops.scala | 0 .../transforms/AddSuffixToModuleNames.scala | 8 ++++---- .../transforms/AvoidExtModuleCollisions.scala | 10 +++++----- .../transforms/ConvertToExtModPass.scala | 10 +++++----- .../tapeout}/transforms/EnumerateModules.scala | 0 .../tapeout}/transforms/Generate.scala | 2 +- .../tapeout}/transforms/ReParentCircuit.scala | 7 +++---- .../transforms/RemoveUnusedModules.scala | 10 +++++----- .../tapeout}/transforms/ResetInverter.scala | 0 .../tapeout}/transforms/retime/Retime.scala | 0 .../tapeout}/transforms/utils/FileUtils.scala | 0 .../transforms/utils/LowerAnnotations.scala | 0 .../transforms/utils/ProgrammaticBundle.scala | 0 .../tapeout}/transforms/utils/YamlHelpers.scala | 0 .../tapeout}/transforms/ResetInverterSpec.scala | 2 +- .../tapeout}/transforms/retime/RetimeSpec.scala | 8 +++----- 31 files changed, 49 insertions(+), 60 deletions(-) rename iocell/src/main/scala/{chisel => barstools.iocell.chisel}/Analog.scala (100%) rename iocell/src/main/scala/{chisel => barstools.iocell.chisel}/IOCell.scala (100%) rename macros/src/main/scala/{ => barstools/macros}/CostMetric.scala (99%) rename macros/src/main/scala/{ => barstools/macros}/MacroCompiler.scala (99%) rename macros/src/main/scala/{ => barstools/macros}/SynFlops.scala (98%) rename macros/src/main/scala/{ => barstools/macros}/Utils.scala (96%) rename macros/src/test/scala/{ => barstools/macros}/CostFunction.scala (100%) rename macros/src/test/scala/{ => barstools/macros}/Functional.scala (100%) rename macros/src/test/scala/{ => barstools/macros}/MacroCompilerSpec.scala (99%) rename macros/src/test/scala/{ => barstools/macros}/Masks.scala (99%) rename macros/src/test/scala/{ => barstools/macros}/MultiPort.scala (100%) rename macros/src/test/scala/{ => barstools/macros}/SRAMCompiler.scala (96%) rename macros/src/test/scala/{ => barstools/macros}/SimpleSplitDepth.scala (99%) rename macros/src/test/scala/{ => barstools/macros}/SimpleSplitWidth.scala (100%) rename macros/src/test/scala/{ => barstools/macros}/SpecificExamples.scala (100%) rename macros/src/test/scala/{ => barstools/macros}/SynFlops.scala (100%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/AddSuffixToModuleNames.scala (94%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/AvoidExtModuleCollisions.scala (83%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/ConvertToExtModPass.scala (89%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/EnumerateModules.scala (100%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/Generate.scala (99%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/ReParentCircuit.scala (89%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/RemoveUnusedModules.scala (91%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/ResetInverter.scala (100%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/retime/Retime.scala (100%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/utils/FileUtils.scala (100%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/utils/LowerAnnotations.scala (100%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/utils/ProgrammaticBundle.scala (100%) rename tapeout/src/main/scala/{ => barstools/tapeout}/transforms/utils/YamlHelpers.scala (100%) rename tapeout/src/test/scala/{ => barstools/tapeout}/transforms/ResetInverterSpec.scala (95%) rename tapeout/src/test/scala/{ => barstools/tapeout}/transforms/retime/RetimeSpec.scala (94%) diff --git a/iocell/src/main/scala/chisel/Analog.scala b/iocell/src/main/scala/barstools.iocell.chisel/Analog.scala similarity index 100% rename from iocell/src/main/scala/chisel/Analog.scala rename to iocell/src/main/scala/barstools.iocell.chisel/Analog.scala diff --git a/iocell/src/main/scala/chisel/IOCell.scala b/iocell/src/main/scala/barstools.iocell.chisel/IOCell.scala similarity index 100% rename from iocell/src/main/scala/chisel/IOCell.scala rename to iocell/src/main/scala/barstools.iocell.chisel/IOCell.scala diff --git a/macros/src/main/scala/CostMetric.scala b/macros/src/main/scala/barstools/macros/CostMetric.scala similarity index 99% rename from macros/src/main/scala/CostMetric.scala rename to macros/src/main/scala/barstools/macros/CostMetric.scala index b80324aa2..450201633 100644 --- a/macros/src/main/scala/CostMetric.scala +++ b/macros/src/main/scala/barstools/macros/CostMetric.scala @@ -67,8 +67,9 @@ object OldMetric extends CostMetric with CostMetricCompanion { */ class ExternalMetric(path: String) extends CostMetric { import mdf.macrolib.Utils.writeMacroToPath + import java.io._ - import scala.language.postfixOps // for !! postfix op + import scala.language.postfixOps import sys.process._ override def cost(mem: Macro, lib: Macro): Option[Double] = { diff --git a/macros/src/main/scala/MacroCompiler.scala b/macros/src/main/scala/barstools/macros/MacroCompiler.scala similarity index 99% rename from macros/src/main/scala/MacroCompiler.scala rename to macros/src/main/scala/barstools/macros/MacroCompiler.scala index c057baa60..416f2d2c3 100644 --- a/macros/src/main/scala/MacroCompiler.scala +++ b/macros/src/main/scala/barstools/macros/MacroCompiler.scala @@ -8,18 +8,17 @@ package barstools.macros -import firrtl._ -import firrtl.ir._ -import firrtl.PrimOps +import barstools.macros.Utils._ +import firrtl.CompilerUtils.getLoweringTransforms import firrtl.Utils._ import firrtl.annotations._ -import firrtl.transforms.{NoDCEAnnotation} -import firrtl.CompilerUtils.getLoweringTransforms -import mdf.macrolib.{PolarizedPort, PortPolarity, SRAMMacro, SRAMGroup, SRAMCompiler} -import scala.collection.mutable.{ArrayBuffer, HashMap} +import firrtl.ir._ +import firrtl.{PrimOps, _} +import mdf.macrolib._ + import java.io.{File, FileWriter} -import scala.io.{Source} -import Utils._ +import scala.collection.mutable.{ArrayBuffer, HashMap} +import scala.io.Source case class MacroCompilerException(msg: String) extends Exception(msg) diff --git a/macros/src/main/scala/SynFlops.scala b/macros/src/main/scala/barstools/macros/SynFlops.scala similarity index 98% rename from macros/src/main/scala/SynFlops.scala rename to macros/src/main/scala/barstools/macros/SynFlops.scala index f815b4cbb..df7390d0a 100644 --- a/macros/src/main/scala/SynFlops.scala +++ b/macros/src/main/scala/barstools/macros/SynFlops.scala @@ -2,11 +2,11 @@ package barstools.macros +import barstools.macros.Utils._ +import firrtl.Utils._ import firrtl._ import firrtl.ir._ -import firrtl.Utils._ -import firrtl.passes.MemPortUtils.{memPortField, memType} -import Utils._ +import firrtl.passes.MemPortUtils.memPortField class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pass { val extraMods = scala.collection.mutable.ArrayBuffer.empty[Module] diff --git a/macros/src/main/scala/Utils.scala b/macros/src/main/scala/barstools/macros/Utils.scala similarity index 96% rename from macros/src/main/scala/Utils.scala rename to macros/src/main/scala/barstools/macros/Utils.scala index c416ca6a0..a65e3a8a0 100644 --- a/macros/src/main/scala/Utils.scala +++ b/macros/src/main/scala/barstools/macros/Utils.scala @@ -2,14 +2,12 @@ package barstools.macros -import firrtl._ -import firrtl.ir._ -import firrtl.PrimOps -import firrtl.passes.memlib.{MemConf, MemPort, ReadPort, WritePort, ReadWritePort, MaskedWritePort, MaskedReadWritePort} import firrtl.Utils.BoolType -import mdf.macrolib.{Constant, MacroPort, SRAMMacro} -import mdf.macrolib.{PolarizedPort, PortPolarity, ActiveLow, ActiveHigh, NegativeEdge, PositiveEdge, MacroExtraPort} -import java.io.File +import firrtl.ir._ +import firrtl.passes.memlib._ +import firrtl.{PrimOps, _} +import mdf.macrolib.{Input => _, Output => _, _} + import scala.language.implicitConversions object MacroCompilerMath { diff --git a/macros/src/test/scala/CostFunction.scala b/macros/src/test/scala/barstools/macros/CostFunction.scala similarity index 100% rename from macros/src/test/scala/CostFunction.scala rename to macros/src/test/scala/barstools/macros/CostFunction.scala diff --git a/macros/src/test/scala/Functional.scala b/macros/src/test/scala/barstools/macros/Functional.scala similarity index 100% rename from macros/src/test/scala/Functional.scala rename to macros/src/test/scala/barstools/macros/Functional.scala diff --git a/macros/src/test/scala/MacroCompilerSpec.scala b/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala similarity index 99% rename from macros/src/test/scala/MacroCompilerSpec.scala rename to macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala index 8cdcf354b..cf84e5004 100644 --- a/macros/src/test/scala/MacroCompilerSpec.scala +++ b/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala @@ -2,14 +2,13 @@ package barstools.macros +import firrtl.Parser.parse import firrtl.ir.{Circuit, NoInfo} import firrtl.passes.RemoveEmpty -import firrtl.Parser.parse - -import java.io.{File, StringWriter} - import mdf.macrolib.SRAMMacro +import java.io.File + abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { import scala.language.implicitConversions implicit def String2SomeString(i: String): Option[String] = Some(i) @@ -122,6 +121,7 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate // A collection of standard SRAM generators. trait HasSRAMGenerator { import mdf.macrolib._ + import scala.language.implicitConversions implicit def Int2SomeInt(i: Int): Option[Int] = Some(i) implicit def BigInt2SomeBigInt(i: BigInt): Option[BigInt] = Some(i) diff --git a/macros/src/test/scala/Masks.scala b/macros/src/test/scala/barstools/macros/Masks.scala similarity index 99% rename from macros/src/test/scala/Masks.scala rename to macros/src/test/scala/barstools/macros/Masks.scala index a091a42af..f104c8f2f 100644 --- a/macros/src/test/scala/Masks.scala +++ b/macros/src/test/scala/barstools/macros/Masks.scala @@ -1,7 +1,5 @@ package barstools.macros -import mdf.macrolib._ - // Test the ability of the compiler to deal with various mask combinations. trait MasksTestSettings { diff --git a/macros/src/test/scala/MultiPort.scala b/macros/src/test/scala/barstools/macros/MultiPort.scala similarity index 100% rename from macros/src/test/scala/MultiPort.scala rename to macros/src/test/scala/barstools/macros/MultiPort.scala diff --git a/macros/src/test/scala/SRAMCompiler.scala b/macros/src/test/scala/barstools/macros/SRAMCompiler.scala similarity index 96% rename from macros/src/test/scala/SRAMCompiler.scala rename to macros/src/test/scala/barstools/macros/SRAMCompiler.scala index 5cae4745a..e4e62de7c 100644 --- a/macros/src/test/scala/SRAMCompiler.scala +++ b/macros/src/test/scala/barstools/macros/SRAMCompiler.scala @@ -1,7 +1,5 @@ package barstools.macros -import mdf.macrolib._ - class SRAMCompiler extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { val compiler = generateSRAMCompiler("awesome", "A") val verilog = s"v-SRAMCompiler.v" diff --git a/macros/src/test/scala/SimpleSplitDepth.scala b/macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala similarity index 99% rename from macros/src/test/scala/SimpleSplitDepth.scala rename to macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala index e3560f9af..f016dbc70 100644 --- a/macros/src/test/scala/SimpleSplitDepth.scala +++ b/macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala @@ -1,7 +1,5 @@ package barstools.macros -import mdf.macrolib._ - // Test the depth splitting aspect of the memory compiler. // This file is for simple tests: one read-write port, powers of two sizes, etc. // For example, implementing a 4096x32 memory using four 1024x32 memories. diff --git a/macros/src/test/scala/SimpleSplitWidth.scala b/macros/src/test/scala/barstools/macros/SimpleSplitWidth.scala similarity index 100% rename from macros/src/test/scala/SimpleSplitWidth.scala rename to macros/src/test/scala/barstools/macros/SimpleSplitWidth.scala diff --git a/macros/src/test/scala/SpecificExamples.scala b/macros/src/test/scala/barstools/macros/SpecificExamples.scala similarity index 100% rename from macros/src/test/scala/SpecificExamples.scala rename to macros/src/test/scala/barstools/macros/SpecificExamples.scala diff --git a/macros/src/test/scala/SynFlops.scala b/macros/src/test/scala/barstools/macros/SynFlops.scala similarity index 100% rename from macros/src/test/scala/SynFlops.scala rename to macros/src/test/scala/barstools/macros/SynFlops.scala diff --git a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala similarity index 94% rename from tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala index 26de5425d..1e7eaa26a 100644 --- a/tapeout/src/main/scala/transforms/AddSuffixToModuleNames.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala @@ -2,12 +2,12 @@ package barstools.tapeout.transforms +import firrtl.Mappers._ import firrtl._ +import firrtl.annotations.{CircuitTarget, ModuleTarget, SingleTargetAnnotation} import firrtl.ir._ -import firrtl.Mappers._ -import firrtl.annotations.{ModuleTarget, SingleTargetAnnotation, CircuitTarget} -import firrtl.stage.TransformManager.{TransformDependency} -import firrtl.stage.{Forms} +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency case class KeepNameAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { diff --git a/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala similarity index 83% rename from tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala index 76ca10061..43aea6c13 100644 --- a/tapeout/src/main/scala/transforms/AvoidExtModuleCollisions.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala @@ -3,12 +3,12 @@ package barstools.tapeout.transforms import firrtl._ +import firrtl.annotations.NoTargetAnnotation import firrtl.ir._ -import firrtl.annotations.{NoTargetAnnotation} -import firrtl.options.{Dependency} -import firrtl.stage.TransformManager.{TransformDependency} -import firrtl.stage.{Forms} -import firrtl.passes.memlib.{ReplSeqMem} +import firrtl.options.Dependency +import firrtl.passes.memlib.ReplSeqMem +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency case class LinkExtModulesAnnotation(mustLink: Seq[ExtModule]) extends NoTargetAnnotation diff --git a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala similarity index 89% rename from tapeout/src/main/scala/transforms/ConvertToExtModPass.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala index 04e645fd6..e6d2272a9 100644 --- a/tapeout/src/main/scala/transforms/ConvertToExtModPass.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala @@ -3,12 +3,12 @@ package barstools.tapeout.transforms import firrtl._ +import firrtl.annotations.{ModuleTarget, ReferenceTarget, SingleTargetAnnotation} import firrtl.ir._ -import firrtl.annotations.{ModuleTarget, SingleTargetAnnotation, ReferenceTarget} -import firrtl.stage.TransformManager.{TransformDependency} -import firrtl.stage.{Forms} -import firrtl.options.{Dependency} -import firrtl.passes.memlib.{ReplSeqMem} +import firrtl.options.Dependency +import firrtl.passes.memlib.ReplSeqMem +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency case class ConvertToExtModAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { diff --git a/tapeout/src/main/scala/transforms/EnumerateModules.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala similarity index 100% rename from tapeout/src/main/scala/transforms/EnumerateModules.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala diff --git a/tapeout/src/main/scala/transforms/Generate.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala similarity index 99% rename from tapeout/src/main/scala/transforms/Generate.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala index 17b8781df..5abb71c1d 100644 --- a/tapeout/src/main/scala/transforms/Generate.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala @@ -5,7 +5,7 @@ import firrtl.annotations._ import firrtl.ir._ import firrtl.passes.memlib.ReplSeqMemAnnotation import firrtl.stage.FirrtlCircuitAnnotation -import firrtl.transforms.{BlackBoxResourceFileNameAnno, DedupModules} +import firrtl.transforms.BlackBoxResourceFileNameAnno import logger.LazyLogging trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => diff --git a/tapeout/src/main/scala/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala similarity index 89% rename from tapeout/src/main/scala/transforms/ReParentCircuit.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala index cbf4d2f85..f7929ee65 100644 --- a/tapeout/src/main/scala/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala @@ -3,11 +3,10 @@ package barstools.tapeout.transforms import firrtl._ -import firrtl.ir._ import firrtl.annotations._ -import firrtl.options.{Dependency} -import firrtl.stage.TransformManager.{TransformDependency} -import firrtl.stage.{Forms} +import firrtl.options.Dependency +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency case class ReParentCircuitAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { diff --git a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala similarity index 91% rename from tapeout/src/main/scala/transforms/RemoveUnusedModules.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala index 3feb67363..7aac89781 100644 --- a/tapeout/src/main/scala/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala @@ -3,12 +3,12 @@ package barstools.tapeout.transforms import firrtl._ +import firrtl.annotations.ModuleTarget import firrtl.ir._ -import firrtl.annotations.{ModuleTarget} -import firrtl.stage.TransformManager.{TransformDependency} -import firrtl.options.{Dependency} -import firrtl.stage.{Forms} -import firrtl.passes.memlib.{ReplSeqMem} +import firrtl.options.Dependency +import firrtl.passes.memlib.ReplSeqMem +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency // Removes all the unused modules in a circuit by recursing through every // instance (starting at the main module) diff --git a/tapeout/src/main/scala/transforms/ResetInverter.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala similarity index 100% rename from tapeout/src/main/scala/transforms/ResetInverter.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala diff --git a/tapeout/src/main/scala/transforms/retime/Retime.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala similarity index 100% rename from tapeout/src/main/scala/transforms/retime/Retime.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala diff --git a/tapeout/src/main/scala/transforms/utils/FileUtils.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala similarity index 100% rename from tapeout/src/main/scala/transforms/utils/FileUtils.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala diff --git a/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala similarity index 100% rename from tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala diff --git a/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala similarity index 100% rename from tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala diff --git a/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala similarity index 100% rename from tapeout/src/main/scala/transforms/utils/YamlHelpers.scala rename to tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala diff --git a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala similarity index 95% rename from tapeout/src/test/scala/transforms/ResetInverterSpec.scala rename to tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala index 9f23c3a87..9a0a1f6d8 100644 --- a/tapeout/src/test/scala/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala @@ -3,7 +3,7 @@ package barstools.tapeout.transforms import chisel3._ -import chisel3.stage.{ChiselStage, ChiselGeneratorAnnotation} +import chisel3.stage.{ChiselGeneratorAnnotation, ChiselStage} import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation} import org.scalatest.{FreeSpec, Matchers} diff --git a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala similarity index 94% rename from tapeout/src/test/scala/transforms/retime/RetimeSpec.scala rename to tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala index 356789917..d5168292a 100644 --- a/tapeout/src/test/scala/transforms/retime/RetimeSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala @@ -1,12 +1,10 @@ // See LICENSE for license details. -package barstools.tapeout.transforms.retime.test +package barstools.tapeout.transforms.retime import chisel3._ -import chisel3.stage.{ChiselStage, ChiselGeneratorAnnotation} -import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation} -import barstools.tapeout.transforms.retime.RetimeLib -import firrtl.FileUtils +import chisel3.stage.{ChiselGeneratorAnnotation, ChiselStage} +import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation, FileUtils} import logger.Logger import org.scalatest.{FlatSpec, Matchers} From 93f86a5bc67ccc87f72cc1cb71204a718c1f516c Mon Sep 17 00:00:00 2001 From: chick Date: Wed, 3 Feb 2021 17:49:14 -0800 Subject: [PATCH 197/273] Reformat all scala files in iocells - Mostly this reformat comments and large argument lists to classes and methods --- .../barstools.iocell.chisel/Analog.scala | 6 +- .../barstools.iocell.chisel/IOCell.scala | 130 +++++++++--------- 2 files changed, 69 insertions(+), 67 deletions(-) diff --git a/iocell/src/main/scala/barstools.iocell.chisel/Analog.scala b/iocell/src/main/scala/barstools.iocell.chisel/Analog.scala index e1b4fc782..0cdfc493b 100644 --- a/iocell/src/main/scala/barstools.iocell.chisel/Analog.scala +++ b/iocell/src/main/scala/barstools.iocell.chisel/Analog.scala @@ -6,8 +6,10 @@ import chisel3._ import chisel3.util.{HasBlackBoxResource} import chisel3.experimental.{Analog, IntParam} -class AnalogConst(value: Int, width: Int = 1) extends BlackBox(Map("CONST" -> IntParam(value), "WIDTH" -> IntParam(width))) with HasBlackBoxResource{ - val io = IO(new Bundle {val io = Analog(width.W) } ) +class AnalogConst(value: Int, width: Int = 1) + extends BlackBox(Map("CONST" -> IntParam(value), "WIDTH" -> IntParam(width))) + with HasBlackBoxResource { + val io = IO(new Bundle { val io = Analog(width.W) }) addResource("/barstools/iocell/vsrc/Analog.v") } diff --git a/iocell/src/main/scala/barstools.iocell.chisel/IOCell.scala b/iocell/src/main/scala/barstools.iocell.chisel/IOCell.scala index a5926030b..d244d2984 100644 --- a/iocell/src/main/scala/barstools.iocell.chisel/IOCell.scala +++ b/iocell/src/main/scala/barstools.iocell.chisel/IOCell.scala @@ -4,7 +4,7 @@ package barstools.iocell.chisel import chisel3._ import chisel3.util.{Cat, HasBlackBoxResource} -import chisel3.experimental.{Analog, DataMirror, IO, BaseModule} +import chisel3.experimental.{Analog, BaseModule, DataMirror, IO} // The following four IO cell bundle types are bare-minimum functional connections // for modeling 4 different IO cell scenarios. The intention is that the user @@ -13,24 +13,22 @@ import chisel3.experimental.{Analog, DataMirror, IO, BaseModule} // (https://github.com/sifive/sifive-blocks/blob/master/src/main/scala/devices/pinctrl/PinCtrl.scala), // but we want to avoid a dependency on an external libraries. -/** - * The base IO bundle for an analog signal (typically something with no digital buffers inside) - * pad: off-chip (external) connection - * core: internal connection - */ +/** The base IO bundle for an analog signal (typically something with no digital buffers inside) + * pad: off-chip (external) connection + * core: internal connection + */ class AnalogIOCellBundle extends Bundle { - val pad = Analog(1.W) // Pad/bump signal (off-chip) - val core = Analog(1.W) // core signal (on-chip) + val pad = Analog(1.W) // Pad/bump signal (off-chip) + val core = Analog(1.W) // core signal (on-chip) } -/** - * The base IO bundle for a signal with runtime-controllable direction - * pad: off-chip (external) connection - * i: input to chip logic (output from IO cell) - * ie: enable signal for i - * o: output from chip logic (input to IO cell) - * oe: enable signal for o - */ +/** The base IO bundle for a signal with runtime-controllable direction + * pad: off-chip (external) connection + * i: input to chip logic (output from IO cell) + * ie: enable signal for i + * o: output from chip logic (input to IO cell) + * oe: enable signal for o + */ class DigitalGPIOCellBundle extends Bundle { val pad = Analog(1.W) val i = Output(Bool()) @@ -39,24 +37,22 @@ class DigitalGPIOCellBundle extends Bundle { val oe = Input(Bool()) } -/** - * The base IO bundle for a digital output signal - * pad: off-chip (external) connection - * o: output from chip logic (input to IO cell) - * oe: enable signal for o - */ +/** The base IO bundle for a digital output signal + * pad: off-chip (external) connection + * o: output from chip logic (input to IO cell) + * oe: enable signal for o + */ class DigitalOutIOCellBundle extends Bundle { val pad = Output(Bool()) val o = Input(Bool()) val oe = Input(Bool()) } -/** - * The base IO bundle for a digital input signal - * pad: off-chip (external) connection - * i: input to chip logic (output from IO cell) - * ie: enable signal for i - */ +/** The base IO bundle for a digital input signal + * pad: off-chip (external) connection + * i: input to chip logic (output from IO cell) + * ie: enable signal for i + */ class DigitalInIOCellBundle extends Bundle { val pad = Input(Bool()) val i = Output(Bool()) @@ -102,11 +98,10 @@ class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell { val io = IO(new DigitalOutIOCellBundle) } - trait IOCellTypeParams { def analog(): AnalogIOCell - def gpio(): DigitalGPIOCell - def input(): DigitalInIOCell + def gpio(): DigitalGPIOCell + def input(): DigitalInIOCell def output(): DigitalOutIOCell } @@ -118,47 +113,49 @@ case class GenericIOCellParams() extends IOCellTypeParams { } object IOCell { - /** - * From within a RawModule or MultiIOModule context, generate new module IOs from a given - * signal and return the new IO and a Seq containing all generated IO cells. - * @param coreSignal The signal onto which to add IO cells - * @param name An optional name or name prefix to use for naming IO cells - * @param abstractResetAsAsync When set, will coerce abstract resets to - * AsyncReset, and otherwise to Bool (sync reset) - * @return A tuple of (the generated IO data node, a Seq of all generated IO cell instances) - */ - def generateIOFromSignal[T <: Data](coreSignal: T, name: String, - typeParams: IOCellTypeParams = GenericIOCellParams(), - abstractResetAsAsync: Boolean = false): (T, Seq[IOCell]) = - { + + /** From within a RawModule or MultiIOModule context, generate new module IOs from a given + * signal and return the new IO and a Seq containing all generated IO cells. + * @param coreSignal The signal onto which to add IO cells + * @param name An optional name or name prefix to use for naming IO cells + * @param abstractResetAsAsync When set, will coerce abstract resets to + * AsyncReset, and otherwise to Bool (sync reset) + * @return A tuple of (the generated IO data node, a Seq of all generated IO cell instances) + */ + def generateIOFromSignal[T <: Data]( + coreSignal: T, + name: String, + typeParams: IOCellTypeParams = GenericIOCellParams(), + abstractResetAsAsync: Boolean = false + ): (T, Seq[IOCell]) = { val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)).suggestName(name) val resetFn = if (abstractResetAsAsync) toAsyncReset else toSyncReset val iocells = IOCell.generateFromSignal(coreSignal, padSignal, Some(s"iocell_$name"), typeParams, resetFn) (padSignal, iocells) } - /** - * Connect two identical signals together by adding IO cells between them and return a Seq - * containing all generated IO cells. - * @param coreSignal The core-side (internal) signal onto which to connect/add IO cells - * @param padSignal The pad-side (external) signal onto which to connect IO cells - * @param name An optional name or name prefix to use for naming IO cells - * @return A Seq of all generated IO cell instances - */ - val toSyncReset: (Reset) => Bool = _.toBool + /** Connect two identical signals together by adding IO cells between them and return a Seq + * containing all generated IO cells. + * @param coreSignal The core-side (internal) signal onto which to connect/add IO cells + * @param padSignal The pad-side (external) signal onto which to connect IO cells + * @param name An optional name or name prefix to use for naming IO cells + * @return A Seq of all generated IO cell instances + */ + val toSyncReset: (Reset) => Bool = _.toBool val toAsyncReset: (Reset) => AsyncReset = _.asAsyncReset def generateFromSignal[T <: Data, R <: Reset]( - coreSignal: T, - padSignal: T, - name: Option[String] = None, - typeParams: IOCellTypeParams = GenericIOCellParams(), - concretizeResetFn : (Reset) => R = toSyncReset): Seq[IOCell] = - { + coreSignal: T, + padSignal: T, + name: Option[String] = None, + typeParams: IOCellTypeParams = GenericIOCellParams(), + concretizeResetFn: (Reset) => R = toSyncReset + ): Seq[IOCell] = { def genCell[T <: Data]( - castToBool: (T) => Bool, - castFromBool: (Bool) => T)( - coreSignal: T, - padSignal: T): Seq[IOCell] = { + castToBool: (T) => Bool, + castFromBool: (Bool) => T + )(coreSignal: T, + padSignal: T + ): Seq[IOCell] = { DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { val iocell = typeParams.input() @@ -188,7 +185,10 @@ object IOCell { if (coreSignal.getWidth == 0) { Seq() } else { - require(coreSignal.getWidth == 1, "Analogs wider than 1 bit are not supported because we can't bit-select Analogs (https://github.com/freechipsproject/chisel3/issues/536)") + require( + coreSignal.getWidth == 1, + "Analogs wider than 1 bit are not supported because we can't bit-select Analogs (https://github.com/freechipsproject/chisel3/issues/536)" + ) val iocell = typeParams.analog() name.foreach(n => iocell.suggestName(n)) iocell.io.core <> coreSignal @@ -204,7 +204,7 @@ object IOCell { // This dummy assignment will prevent invalid firrtl from being emitted DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => coreSignal := 0.U - case _ => {} + case _ => {} } Seq() } else { From 68c3425493501b17fac5af62fbf2432263784292 Mon Sep 17 00:00:00 2001 From: chick Date: Wed, 3 Feb 2021 17:50:36 -0800 Subject: [PATCH 198/273] Reformat all scala files in macros - Mostly this reformat comments and large argument lists to classes and methods --- .../scala/barstools/macros/CostMetric.scala | 84 +-- .../barstools/macros/MacroCompiler.scala | 537 +++++++++------- .../scala/barstools/macros/SynFlops.scala | 236 +++---- .../main/scala/barstools/macros/Utils.scala | 266 ++++---- .../src/test/resources/lib-MaskPortTest.json | 48 +- .../test/resources/lib-WriteEnableTest.json | 42 +- .../scala/barstools/macros/CostFunction.scala | 39 +- .../barstools/macros/MacroCompilerSpec.scala | 605 ++++++++++-------- .../test/scala/barstools/macros/Masks.scala | 165 +++-- .../scala/barstools/macros/MultiPort.scala | 294 ++++++--- .../scala/barstools/macros/SRAMCompiler.scala | 3 +- .../barstools/macros/SimpleSplitDepth.scala | 188 +++--- .../barstools/macros/SimpleSplitWidth.scala | 257 +++++--- .../barstools/macros/SpecificExamples.scala | 106 +-- .../scala/barstools/macros/SynFlops.scala | 133 ++-- 15 files changed, 1771 insertions(+), 1232 deletions(-) diff --git a/macros/src/main/scala/barstools/macros/CostMetric.scala b/macros/src/main/scala/barstools/macros/CostMetric.scala index 450201633..f39303d30 100644 --- a/macros/src/main/scala/barstools/macros/CostMetric.scala +++ b/macros/src/main/scala/barstools/macros/CostMetric.scala @@ -2,27 +2,25 @@ package barstools.macros -/** - * Trait which can calculate the cost of compiling a memory against a certain - * library memory macro using a cost function. - */ +/** Trait which can calculate the cost of compiling a memory against a certain + * library memory macro using a cost function. + */ // TODO: eventually explore compiling a single target memory using multiple // different kinds of target memory. trait CostMetric extends Serializable { - /** - * Cost function that returns the cost of compiling a memory using a certain - * macro. - * - * @param mem Memory macro to compile (target memory) - * @param lib Library memory macro to use (library memory) - * @return The cost of this compile, defined by this cost metric, or None if - * it cannot be compiled. - */ + + /** Cost function that returns the cost of compiling a memory using a certain + * macro. + * + * @param mem Memory macro to compile (target memory) + * @param lib Library memory macro to use (library memory) + * @return The cost of this compile, defined by this cost metric, or None if + * it cannot be compiled. + */ def cost(mem: Macro, lib: Macro): Option[Double] - /** - * Helper function to return the map of arguments (or an empty map if there are none). - */ + /** Helper function to return the map of arguments (or an empty map if there are none). + */ def commandLineParams(): Map[String, String] // We also want this to show up for the class itself. @@ -40,8 +38,9 @@ trait CostMetricCompanion { // Some default cost functions. /** Palmer's old metric. - * TODO: figure out what is the difference between this metric and the current - * default metric and either revive or delete this metric. */ + * TODO: figure out what is the difference between this metric and the current + * default metric and either revive or delete this metric. + */ object OldMetric extends CostMetric with CostMetricCompanion { override def cost(mem: Macro, lib: Macro): Option[Double] = { /* Palmer: A quick cost function (that must be kept in sync with @@ -58,13 +57,12 @@ object OldMetric extends CostMetric with CostMetricCompanion { override def construct(m: Map[String, String]) = OldMetric } -/** - * An external cost function. - * Calls the specified path with paths to the JSON MDF representation of the mem - * and lib macros. The external executable should print a Double. - * None will be returned if the external executable does not print a valid - * Double. - */ +/** An external cost function. + * Calls the specified path with paths to the JSON MDF representation of the mem + * and lib macros. The external executable should print a Double. + * None will be returned if the external executable does not print a valid + * Double. + */ class ExternalMetric(path: String) extends CostMetric { import mdf.macrolib.Utils.writeMacroToPath @@ -105,7 +103,7 @@ object ExternalMetric extends CostMetricCompanion { override def construct(m: Map[String, String]) = { val pathOption = m.get("path") pathOption match { - case Some(path:String) => new ExternalMetric(path) + case Some(path: String) => new ExternalMetric(path) case _ => throw new IllegalArgumentException("ExternalMetric missing option 'path'") } } @@ -115,14 +113,17 @@ object ExternalMetric extends CostMetricCompanion { // TODO: write tests for this function to make sure it selects the right things object DefaultMetric extends CostMetric with CostMetricCompanion { override def cost(mem: Macro, lib: Macro): Option[Double] = { - val memMask = mem.src.ports map (_.maskGran) find (_.isDefined) map (_.get) - val libMask = lib.src.ports map (_.maskGran) find (_.isDefined) map (_.get) + val memMask = mem.src.ports.map(_.maskGran).find(_.isDefined).map(_.get) + val libMask = lib.src.ports.map(_.maskGran).find(_.isDefined).map(_.get) val memWidth = (memMask, libMask) match { case (None, _) => mem.src.width - case (Some(p), None) => (mem.src.width/p)*math.ceil(p.toDouble/lib.src.width)*lib.src.width //We map the mask to distinct memories + case (Some(p), None) => + (mem.src.width / p) * math.ceil( + p.toDouble / lib.src.width + ) * lib.src.width //We map the mask to distinct memories case (Some(p), Some(m)) => { - if(m <= p) (mem.src.width/p)*math.ceil(p.toDouble/m)*m //Using multiple m's to create a p (integeraly) - else (mem.src.width/p)*m //Waste the extra maskbits + if (m <= p) (mem.src.width / p) * math.ceil(p.toDouble / m) * m //Using multiple m's to create a p (integeraly) + else (mem.src.width / p) * m //Waste the extra maskbits } } val depthCost = math.ceil(mem.src.depth.toDouble / lib.src.depth.toDouble) @@ -130,10 +131,10 @@ object DefaultMetric extends CostMetric with CostMetricCompanion { val bitsCost = (lib.src.depth * lib.src.width).toDouble // Fraction of wasted bits plus const per mem val requestedBits = (mem.src.depth * mem.src.width).toDouble - val bitsWasted = depthCost*widthCost*bitsCost - requestedBits + val bitsWasted = depthCost * widthCost * bitsCost - requestedBits val wastedConst = 0.05 // 0 means waste as few bits with no regard for instance count - val costPerInst = wastedConst*depthCost*widthCost - Some(1.0*bitsWasted/requestedBits+costPerInst) + val costPerInst = wastedConst * depthCost * widthCost + Some(1.0 * bitsWasted / requestedBits + costPerInst) } override def commandLineParams = Map() @@ -148,10 +149,11 @@ object MacroCompilerUtil { // Adapted from https://stackoverflow.com/a/134918 /** Serialize an arbitrary object to String. - * Used to pass structured values through as an annotation. */ + * Used to pass structured values through as an annotation. + */ def objToString(o: Serializable): String = { val baos: ByteArrayOutputStream = new ByteArrayOutputStream - val oos: ObjectOutputStream = new ObjectOutputStream(baos) + val oos: ObjectOutputStream = new ObjectOutputStream(baos) oos.writeObject(o) oos.close() return Base64.getEncoder.encodeToString(baos.toByteArray) @@ -168,6 +170,7 @@ object MacroCompilerUtil { } object CostMetric { + /** Define some default metric. */ val default: CostMetric = DefaultMetric @@ -178,11 +181,10 @@ object CostMetric { registerCostMetric(ExternalMetric) registerCostMetric(DefaultMetric) - /** - * Register a cost metric. - * @param createFuncHelper Companion object to fetch the name and construct - * the metric. - */ + /** Register a cost metric. + * @param createFuncHelper Companion object to fetch the name and construct + * the metric. + */ def registerCostMetric(createFuncHelper: CostMetricCompanion): Unit = { costMetricCreators.update(createFuncHelper.name, createFuncHelper) } diff --git a/macros/src/main/scala/barstools/macros/MacroCompiler.scala b/macros/src/main/scala/barstools/macros/MacroCompiler.scala index 416f2d2c3..5ecfea8f7 100644 --- a/macros/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/macros/src/main/scala/barstools/macros/MacroCompiler.scala @@ -1,10 +1,9 @@ // See LICENSE for license details. -/** - * Terminology note: - * mem - target memory to compile, in design (e.g. Mem() in rocket) - * lib - technology SRAM(s) to use to compile mem - */ +/** Terminology note: + * mem - target memory to compile, in design (e.g. Mem() in rocket) + * lib - technology SRAM(s) to use to compile mem + */ package barstools.macros @@ -29,56 +28,75 @@ case class MacroCompilerAnnotation(content: String) extends NoTargetAnnotation { def params: Params = MacroCompilerUtil.objFromString(content).asInstanceOf[Params] } - -/** - * The MacroCompilerAnnotation to trigger the macro compiler. - * Note that this annotation does NOT actually target any modules for - * compilation. It simply holds all the settings for the memory compiler. The - * actual selection of which memories to compile is set in the Params. - * - * To use, simply annotate the entire circuit itself with this annotation and - * include [[MacroCompilerTransform]]. - * - */ +/** The MacroCompilerAnnotation to trigger the macro compiler. + * Note that this annotation does NOT actually target any modules for + * compilation. It simply holds all the settings for the memory compiler. The + * actual selection of which memories to compile is set in the Params. + * + * To use, simply annotate the entire circuit itself with this annotation and + * include [[MacroCompilerTransform]]. + */ object MacroCompilerAnnotation { + /** Macro compiler mode. */ sealed trait CompilerMode + /** Strict mode - must compile all memories or error out. */ case object Strict extends CompilerMode + /** Synflops mode - compile all memories with synflops (do not map to lib at all). */ case object Synflops extends CompilerMode + /** CompileAndSynflops mode - compile all memories and create mock versions of the target libs with synflops. */ case object CompileAndSynflops extends CompilerMode - /** FallbackSynflops - compile all memories to SRAM when possible and fall back to synflops if a memory fails. **/ + + /** FallbackSynflops - compile all memories to SRAM when possible and fall back to synflops if a memory fails. * */ case object FallbackSynflops extends CompilerMode - /** CompileAvailable - compile what is possible and do nothing with uncompiled memories. **/ + + /** CompileAvailable - compile what is possible and do nothing with uncompiled memories. * */ case object CompileAvailable extends CompilerMode - /** - * The default mode for the macro compiler. - * TODO: Maybe set the default to FallbackSynflops (typical for - * vlsi_mem_gen-like scripts) once it's implemented? - */ + /** The default mode for the macro compiler. + * TODO: Maybe set the default to FallbackSynflops (typical for + * vlsi_mem_gen-like scripts) once it's implemented? + */ val Default = CompileAvailable // Options as list of (CompilerMode, command-line name, description) val options: Seq[(CompilerMode, String, String)] = Seq( (Default, "default", "Select the default option from below."), (Strict, "strict", "Compile all memories to library or return an error."), - (Synflops, "synflops", "Produces synthesizable flop-based memories for all memories (do not map to lib at all); likely useful for simulation purposes."), - (CompileAndSynflops, "compileandsynflops", "Compile all memories and create mock versions of the target libs with synflops; likely also useful for simulation purposes."), - (FallbackSynflops, "fallbacksynflops", "Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible."), - (CompileAvailable, "compileavailable", "Compile all memories to library when possible and do nothing in case of errors. (default)") + ( + Synflops, + "synflops", + "Produces synthesizable flop-based memories for all memories (do not map to lib at all); likely useful for simulation purposes." + ), + ( + CompileAndSynflops, + "compileandsynflops", + "Compile all memories and create mock versions of the target libs with synflops; likely also useful for simulation purposes." + ), + ( + FallbackSynflops, + "fallbacksynflops", + "Compile all memories to library when possible and fall back to synthesizable flop-based memories when library synth is not possible." + ), + ( + CompileAvailable, + "compileavailable", + "Compile all memories to library when possible and do nothing in case of errors. (default)" + ) ) /** Helper function to select a compiler mode. */ - def stringToCompilerMode(str: String): CompilerMode = options.collectFirst { case (mode, cmd, _) if cmd == str => mode } match { + def stringToCompilerMode(str: String): CompilerMode = options.collectFirst { + case (mode, cmd, _) if cmd == str => mode + } match { case Some(x) => x - case None => throw new IllegalArgumentException("No such compiler mode " + str) + case None => throw new IllegalArgumentException("No such compiler mode " + str) } - /** - * Parameters associated to this MacroCompilerAnnotation. + /** Parameters associated to this MacroCompilerAnnotation. * * @param mem Path to memory lib * @param memFormat Type of memory lib (Some("conf"), Some("mdf"), or None (defaults to mdf)) @@ -89,26 +107,34 @@ object MacroCompilerAnnotation { * @param forceCompile Set of memories to force compiling to lib regardless of the mode * @param forceSynflops Set of memories to force compiling as flops regardless of the mode */ - case class Params(mem: String, memFormat: Option[String], lib: Option[String], hammerIR: Option[String], - costMetric: CostMetric, mode: CompilerMode, useCompiler: Boolean, - forceCompile: Set[String], forceSynflops: Set[String]) - - /** - * Create a MacroCompilerAnnotation. - * @param c Top-level circuit name (see class description) - * @param p Parameters (see above). - */ + case class Params( + mem: String, + memFormat: Option[String], + lib: Option[String], + hammerIR: Option[String], + costMetric: CostMetric, + mode: CompilerMode, + useCompiler: Boolean, + forceCompile: Set[String], + forceSynflops: Set[String]) + + /** Create a MacroCompilerAnnotation. + * @param c Top-level circuit name (see class description) + * @param p Parameters (see above). + */ def apply(c: String, p: Params): MacroCompilerAnnotation = MacroCompilerAnnotation(MacroCompilerUtil.objToString(p)) } -class MacroCompilerPass(mems: Option[Seq[Macro]], - libs: Option[Seq[Macro]], - compilers: Option[SRAMCompiler], - hammerIR: Option[String], - costMetric: CostMetric = CostMetric.default, - mode: MacroCompilerAnnotation.CompilerMode = MacroCompilerAnnotation.Default) extends firrtl.passes.Pass { +class MacroCompilerPass( + mems: Option[Seq[Macro]], + libs: Option[Seq[Macro]], + compilers: Option[SRAMCompiler], + hammerIR: Option[String], + costMetric: CostMetric = CostMetric.default, + mode: MacroCompilerAnnotation.CompilerMode = MacroCompilerAnnotation.Default) + extends firrtl.passes.Pass { // Helper function to check the legality of bitPairs. // e.g. ((0,21), (22,43)) is legal // ((0,21), (22,21)) is illegal and will throw an assert @@ -120,8 +146,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], }) } - /** - * Calculate bit pairs. + /** Calculate bit pairs. * This is a list of submemories by width. * The tuples are (lsb, msb) inclusive. * Example: (0, 7) and (8, 15) might be a split for a width=16 memory into two width=8 target memories. @@ -132,7 +157,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * @return Bit pairs or empty list if there was an error. */ private def calculateBitPairs(mem: Macro, lib: Macro): Seq[(BigInt, BigInt)] = { - val pairedPorts = mem.sortedPorts zip lib.sortedPorts + val pairedPorts = mem.sortedPorts.zip(lib.sortedPorts) val bitPairs = ArrayBuffer[(BigInt, BigInt)]() var currentLSB: BigInt = 0 @@ -203,7 +228,9 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], splitMemory(memMask.get) } else { // e.g. mem mask = 13, lib width = 8 - System.err.println(s"Unmasked target memory: unaligned mem maskGran $p with lib (${lib.src.name}) width ${libPort.src.width.get} not supported") + System.err.println( + s"Unmasked target memory: unaligned mem maskGran $p with lib (${lib.src.name}) width ${libPort.src.width.get} not supported" + ) return Seq() } } @@ -266,9 +293,11 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } def compile(mem: Macro, lib: Macro): Option[(Module, Macro)] = { - assert(mem.sortedPorts.lengthCompare(lib.sortedPorts.length) == 0, - "mem and lib should have an equal number of ports") - val pairedPorts = mem.sortedPorts zip lib.sortedPorts + assert( + mem.sortedPorts.lengthCompare(lib.sortedPorts.length) == 0, + "mem and lib should have an equal number of ports" + ) + val pairedPorts = mem.sortedPorts.zip(lib.sortedPorts) // Width mapping. See calculateBitPairs. val bitPairs: Seq[(BigInt, BigInt)] = calculateBitPairs(mem, lib) @@ -287,14 +316,14 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], /* Palmer: If we've got a parallel memory then we've got to take the * address bits into account. */ if (mem.src.depth > lib.src.depth) { - mem.src.ports foreach { port => + mem.src.ports.foreach { port => val high = MacroCompilerMath.ceilLog2(mem.src.depth) val low = MacroCompilerMath.ceilLog2(lib.src.depth) val ref = WRef(port.address.name) val nodeName = s"${ref.name}_sel" - val tpe = UIntType(IntWidth(high-low)) + val tpe = UIntType(IntWidth(high - low)) selects(ref.name) = WRef(nodeName, tpe) - stmts += DefNode(NoInfo, nodeName, bits(ref, high-1, low)) + stmts += DefNode(NoInfo, nodeName, bits(ref, high - 1, low)) // Donggyu: output selection should be piped if (port.output.isDefined) { val regName = s"${ref.name}_sel_reg" @@ -303,7 +332,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], and(WRef(ce.name, BoolType), WRef(re.name, BoolType)) case (Some(ce), None) => WRef(ce.name, BoolType) case (None, Some(re)) => WRef(re.name, BoolType) - case (None, None) => one + case (None, None) => one } selectRegs(ref.name) = WRef(regName, tpe) stmts += DefRegister(NoInfo, regName, tpe, WRef(port.clock.get.name), zero, WRef(regName)) @@ -317,18 +346,18 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Create the instance. stmts += WDefInstance(NoInfo, name, lib.src.name, lib.tpe) // Connect extra ports of the lib. - stmts ++= lib.extraPorts map { case (portName, portValue) => + stmts ++= lib.extraPorts.map { case (portName, portValue) => Connect(NoInfo, WSubField(WRef(name), portName), portValue) } } for ((memPort, libPort) <- pairedPorts) { - val addrMatch = selects get memPort.src.address.name match { + val addrMatch = selects.get(memPort.src.address.name) match { case None => one case Some(addr) => val index = UIntLiteral(i, IntWidth(bitWidth(addr.tpe))) DoPrim(PrimOps.Eq, Seq(addr, index), Nil, index.tpe) } - val addrMatchReg = selectRegs get memPort.src.address.name match { + val addrMatchReg = selectRegs.get(memPort.src.address.name) match { case None => one case Some(reg) => val index = UIntLiteral(i, IntWidth(bitWidth(reg.tpe))) @@ -341,29 +370,22 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], for (((low, high), j) <- bitPairs.zipWithIndex) { val inst = WRef(s"mem_${i}_${j}", lib.tpe) - def connectPorts2(mem: Expression, - lib: String, - polarity: Option[PortPolarity]): Statement = + def connectPorts2(mem: Expression, lib: String, polarity: Option[PortPolarity]): Statement = Connect(NoInfo, WSubField(inst, lib), portToExpression(mem, polarity)) - def connectPorts(mem: Expression, - lib: String, - polarity: PortPolarity): Statement = + def connectPorts(mem: Expression, lib: String, polarity: PortPolarity): Statement = connectPorts2(mem, lib, Some(polarity)) // Clock port mapping /* Palmer: FIXME: I don't handle memories with read/write clocks yet. */ /* Colin not all libPorts have clocks but all memPorts do*/ libPort.src.clock.foreach { cPort => - stmts += connectPorts(WRef(memPort.src.clock.get.name), - cPort.name, - cPort.polarity) } + stmts += connectPorts(WRef(memPort.src.clock.get.name), cPort.name, cPort.polarity) + } // Adress port mapping /* Palmer: The address port to a memory is just the low-order bits of * the top address. */ - stmts += connectPorts(WRef(memPort.src.address.name), - libPort.src.address.name, - libPort.src.address.polarity) + stmts += connectPorts(WRef(memPort.src.address.name), libPort.src.address.name, libPort.src.address.polarity) // Output port mapping (memPort.src.output, libPort.src.output) match { @@ -373,20 +395,20 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * done after generating all the memories. This saves up the * output statements for later. */ val name = s"${mem}_${i}_${j}" // This name is the output from the instance (mem vs ${mem}). - val exp = portToExpression(bits(WSubField(inst, lib), high-low, 0), Some(lib_polarity)) + val exp = portToExpression(bits(WSubField(inst, lib), high - low, 0), Some(lib_polarity)) stmts += DefNode(NoInfo, name, exp) cats += WRef(name) case (None, Some(lib)) => - /* Palmer: If the inner memory has an output port but the outer - * one doesn't then it's safe to just leave the outer - * port floating. */ + /* Palmer: If the inner memory has an output port but the outer + * one doesn't then it's safe to just leave the outer + * port floating. */ case (None, None) => - /* Palmer: If there's no output ports at all (ie, read-only - * port on the memory) then just don't worry about it, - * there's nothing to do. */ + /* Palmer: If there's no output ports at all (ie, read-only + * port on the memory) then just don't worry about it, + * there's nothing to do. */ case (Some(PolarizedPort(mem, _)), None) => - System.err println "WARNING: Unable to match output ports on memory" - System.err println s" outer output port: ${mem}" + System.err.println("WARNING: Unable to match output ports on memory") + System.err.println(s" outer output port: ${mem}") return None } @@ -396,7 +418,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], /* Palmer: The input port to a memory just needs to happen in parallel, * this does a part select to narrow the memory down. */ stmts += connectPorts(bits(WRef(mem), high, low), lib, lib_polarity) - case (None, Some(lib)) => + case (None, Some(lib)) => /* Palmer: If the inner memory has an input port but the other * one doesn't then it's safe to just leave the inner * port floating. This should be handled by the @@ -405,12 +427,12 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], //Firrtl cares about dangling inputs now tie it off stmts += IsInvalid(NoInfo, WSubField(inst, lib.name)) case (None, None) => - /* Palmer: If there's no input ports at all (ie, read-only - * port on the memory) then just don't worry about it, - * there's nothing to do. */ + /* Palmer: If there's no input ports at all (ie, read-only + * port on the memory) then just don't worry about it, + * there's nothing to do. */ case (Some(PolarizedPort(mem, _)), None) => - System.err println "WARNING: Unable to match input ports on memory" - System.err println s" outer input port: ${mem}" + System.err.println("WARNING: Unable to match input ports on memory") + System.err.println(s" outer input port: ${mem}") return None } @@ -429,26 +451,33 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Example: if we have a lib whose maskGran is 8 but our mem's maskGran is 4. // The other case is if we're using a larger lib than mem. val usingLessThanLibMaskGran = (memPort.src.maskGran.get < libPort.src.effectiveMaskGran) - val effectiveLibWidth = if (usingLessThanLibMaskGran) - memPort.src.maskGran.get - else - libPort.src.width.get - - cat(((0 until libPort.src.width.get by libPort.src.effectiveMaskGran) map (i => { - if (usingLessThanLibMaskGran && i >= effectiveLibWidth) { - // If the memMaskGran is smaller than the lib's gran, then - // zero out the upper bits. - zero - } else { - if ((low + i) >= memPort.src.width.get) { - // If our bit is larger than the whole width of the mem, just zero out the upper bits. - zero - } else { - // Pick the appropriate bit from the mem mask. - bits(WRef(mem), (low + i) / memPort.src.effectiveMaskGran) - } - } - })).reverse) + val effectiveLibWidth = + if (usingLessThanLibMaskGran) + memPort.src.maskGran.get + else + libPort.src.width.get + + cat( + ( + (0 until libPort.src.width.get by libPort.src.effectiveMaskGran) + .map(i => { + if (usingLessThanLibMaskGran && i >= effectiveLibWidth) { + // If the memMaskGran is smaller than the lib's gran, then + // zero out the upper bits. + zero + } else { + if ((low + i) >= memPort.src.width.get) { + // If our bit is larger than the whole width of the mem, just zero out the upper bits. + zero + } else { + // Pick the appropriate bit from the mem mask. + bits(WRef(mem), (low + i) / memPort.src.effectiveMaskGran) + } + } + }) + ) + .reverse + ) } case None => /* If there is a lib mask port but no mem mask port, just turn on @@ -482,7 +511,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Chip enable port mapping val memChipEnable = memPort.src.chipEnable match { case Some(PolarizedPort(mem, _)) => WRef(mem) - case None => one + case None => one } // Read enable port mapping @@ -501,7 +530,11 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * implement the outer memory's collection of ports using what * the inner memory has availiable. */ ((libPort.src.maskPort, libPort.src.writeEnable, libPort.src.chipEnable): @unchecked) match { - case (Some(PolarizedPort(mask, mask_polarity)), Some(PolarizedPort(we, we_polarity)), Some(PolarizedPort(en, en_polarity))) => + case ( + Some(PolarizedPort(mask, mask_polarity)), + Some(PolarizedPort(we, we_polarity)), + Some(PolarizedPort(en, en_polarity)) + ) => /* Palmer: This is the simple option: every port exists. */ stmts += connectPorts(memMask, mask, mask_polarity) stmts += connectPorts(andAddrMatch(memWriteEnable), we, we_polarity) @@ -509,8 +542,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], case (Some(PolarizedPort(mask, mask_polarity)), Some(PolarizedPort(we, we_polarity)), None) => /* Palmer: If we don't have a chip enable but do have mask ports. */ stmts += connectPorts(memMask, mask, mask_polarity) - stmts += connectPorts(andAddrMatch(and(memWriteEnable, memChipEnable)), - we, we_polarity) + stmts += connectPorts(andAddrMatch(and(memWriteEnable, memChipEnable)), we, we_polarity) case (None, Some(PolarizedPort(we, we_polarity)), chipEnable) => if (bitWidth(memMask.tpe) == 1) { /* Palmer: If we're expected to provide mask ports without a @@ -518,13 +550,15 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], * write enable port instead of the mask port. */ chipEnable match { case Some(PolarizedPort(en, en_polarity)) => { - stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), - we, we_polarity) + stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), we, we_polarity) stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) } case _ => { - stmts += connectPorts(andAddrMatch(and(and(memWriteEnable, memChipEnable), memMask)), - we, we_polarity) + stmts += connectPorts( + andAddrMatch(and(and(memWriteEnable, memChipEnable), memMask)), + we, + we_polarity + ) } } } else { @@ -532,8 +566,8 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], return None } case (None, None, None) => - // No write ports to match up (this may be a read-only port). - // This isn't necessarily an error condition. + // No write ports to match up (this may be a read-only port). + // This isn't necessarily an error condition. } } // Cat macro outputs for selection @@ -541,7 +575,7 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], case Some(PolarizedPort(mem, _)) if cats.nonEmpty => val name = s"${mem}_${i}" stmts += DefNode(NoInfo, name, cat(cats.toSeq.reverse)) - (outputs getOrElseUpdate (mem, ArrayBuffer[(Expression, Expression)]())) += + (outputs.getOrElseUpdate(mem, ArrayBuffer[(Expression, Expression)]())) += (addrMatchReg -> WRef(name)) case _ => } @@ -549,15 +583,17 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], } // Connect mem outputs val zeroOutputValue: Expression = UIntLiteral(0, IntWidth(mem.src.width)) - mem.src.ports foreach { port => + mem.src.ports.foreach { port => port.output match { - case Some(PolarizedPort(mem, _)) => outputs get mem match { - case Some(select) => - val output = (select foldRight (zeroOutputValue)) { - case ((cond, tval), fval) => Mux(cond, tval, fval, fval.tpe) } - stmts += Connect(NoInfo, WRef(mem), output) - case None => - } + case Some(PolarizedPort(mem, _)) => + outputs.get(mem) match { + case Some(select) => + val output = (select.foldRight(zeroOutputValue)) { case ((cond, tval), fval) => + Mux(cond, tval, fval, fval.tpe) + } + stmts += Connect(NoInfo, WRef(mem), output) + case None => + } case None => } } @@ -572,79 +608,84 @@ class MacroCompilerPass(mems: Option[Seq[Macro]], // Try to compile each of the memories in mems. // The 'state' is c.modules, which is a list of all the firrtl modules // in the 'circuit'. - (mems foldLeft c.modules){ (modules, mem) => - - val sram = mem.src - def groupMatchesMask(group: SRAMGroup, mem:SRAMMacro): Boolean = { - val memMask = mem.ports map (_.maskGran) find (_.isDefined) map (_.get) - val libMask = group.ports map (_.maskGran) find (_.isDefined) map (_.get) - (memMask, libMask) match { - case (None, _) => true - case (Some(_), None) => false - case (Some(m), Some(l)) => l <= m //Ignore memories that don't have nice mask + (mems.foldLeft(c.modules)) { (modules, mem) => + val sram = mem.src + def groupMatchesMask(group: SRAMGroup, mem: SRAMMacro): Boolean = { + val memMask = mem.ports.map(_.maskGran).find(_.isDefined).map(_.get) + val libMask = group.ports.map(_.maskGran).find(_.isDefined).map(_.get) + (memMask, libMask) match { + case (None, _) => true + case (Some(_), None) => false + case (Some(m), Some(l)) => l <= m //Ignore memories that don't have nice mask + } } - } - // Add compiler memories that might map well to libs - val compLibs = compilers match { - case Some(SRAMCompiler(_, groups)) => { - groups.filter(g => g.family == sram.family && groupMatchesMask(g, sram)).map( g => { - for(w <- g.width; d <- g.depth if((sram.width % w == 0) && (sram.depth % d == 0))) - yield Seq(new Macro(buildSRAMMacro(g, d, w, g.vt.head))) - } ) + // Add compiler memories that might map well to libs + val compLibs = compilers match { + case Some(SRAMCompiler(_, groups)) => { + groups + .filter(g => g.family == sram.family && groupMatchesMask(g, sram)) + .map(g => { + for { + w <- g.width + d <- g.depth if ((sram.width % w == 0) && (sram.depth % d == 0)) + } yield Seq(new Macro(buildSRAMMacro(g, d, w, g.vt.head))) + }) + } + case None => Seq() } - case None => Seq() - } - val fullLibs = libs ++ compLibs.flatten.flatten - - // Try to compile mem against each lib in libs, keeping track of the - // best compiled version, external lib used, and cost. - val (best, cost) = (fullLibs foldLeft (None: Option[(Module, Macro)], Double.MaxValue)){ - case ((best, cost), lib) if mem.src.ports.size != lib.src.ports.size => - /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same - * order, but I'm starting with what actually gets generated. */ - System.err println s"INFO: unable to compile ${mem.src.name} using ${lib.src.name} port count must match" - (best, cost) - case ((best, cost), lib) => - // Run the cost function to evaluate this potential compile. - costMetric.cost(mem, lib) match { - case Some(newCost) => { - //System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") - // Try compiling - compile(mem, lib) match { - // If it was successful and the new cost is lower - case Some(p) if (newCost < cost) => (Some(p), newCost) - case _ => (best, cost) + val fullLibs = libs ++ compLibs.flatten.flatten + + // Try to compile mem against each lib in libs, keeping track of the + // best compiled version, external lib used, and cost. + val (best, cost) = (fullLibs.foldLeft(None: Option[(Module, Macro)], Double.MaxValue)) { + case ((best, cost), lib) if mem.src.ports.size != lib.src.ports.size => + /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same + * order, but I'm starting with what actually gets generated. */ + System.err.println(s"INFO: unable to compile ${mem.src.name} using ${lib.src.name} port count must match") + (best, cost) + case ((best, cost), lib) => + // Run the cost function to evaluate this potential compile. + costMetric.cost(mem, lib) match { + case Some(newCost) => { + //System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") + // Try compiling + compile(mem, lib) match { + // If it was successful and the new cost is lower + case Some(p) if (newCost < cost) => (Some(p), newCost) + case _ => (best, cost) + } } + case _ => (best, cost) // Cost function rejected this combination. } - case _ => (best, cost) // Cost function rejected this combination. - } - } - - // If we were able to compile anything, then replace the original module - // in the modules list with a compiled version, as well as the extmodule - // stub for the lib. - best match { - case None => { - if (mode == MacroCompilerAnnotation.Strict) - throw new MacroCompilerException(s"Target memory ${mem.src.name} could not be compiled and strict mode is activated - aborting.") - else - modules } - case Some((mod, bb)) => - hammerIR match { - case Some(f) => { - val hammerIRWriter = new FileWriter(new File(f), !firstLib) - if(firstLib) hammerIRWriter.write("[\n") - hammerIRWriter.write(bb.src.toJSON().toString()) - hammerIRWriter.write("\n,\n") - hammerIRWriter.close() - firstLib = false - } - case None => + + // If we were able to compile anything, then replace the original module + // in the modules list with a compiled version, as well as the extmodule + // stub for the lib. + best match { + case None => { + if (mode == MacroCompilerAnnotation.Strict) + throw new MacroCompilerException( + s"Target memory ${mem.src.name} could not be compiled and strict mode is activated - aborting." + ) + else + modules } - (modules filterNot (m => m.name == mod.name || m.name == bb.blackbox.name)) ++ Seq(mod, bb.blackbox) + case Some((mod, bb)) => + hammerIR match { + case Some(f) => { + val hammerIRWriter = new FileWriter(new File(f), !firstLib) + if (firstLib) hammerIRWriter.write("[\n") + hammerIRWriter.write(bb.src.toJSON().toString()) + hammerIRWriter.write("\n,\n") + hammerIRWriter.close() + firstLib = false + } + case None => + } + (modules.filterNot(m => m.name == mod.name || m.name == bb.blackbox.name)) ++ Seq(mod, bb.blackbox) + } } - } case _ => c.modules } c.copy(modules = modules) @@ -657,38 +698,46 @@ class MacroCompilerTransform extends Transform { def execute(state: CircuitState) = state.annotations.collect { case a: MacroCompilerAnnotation => a } match { case Seq(anno: MacroCompilerAnnotation) => - val MacroCompilerAnnotation.Params(memFile, memFileFormat, libFile, hammerIR, costMetric, mode, useCompiler, forceCompile, forceSynflops) = anno.params + val MacroCompilerAnnotation.Params( + memFile, + memFileFormat, + libFile, + hammerIR, + costMetric, + mode, + useCompiler, + forceCompile, + forceSynflops + ) = anno.params if (mode == MacroCompilerAnnotation.FallbackSynflops) { throw new UnsupportedOperationException("Not implemented yet") } // Check that we don't have any modules both forced to compile and synflops. - assert((forceCompile intersect forceSynflops).isEmpty, "Cannot have modules both forced to compile and synflops") + assert((forceCompile.intersect(forceSynflops)).isEmpty, "Cannot have modules both forced to compile and synflops") // Read, eliminate None, get only SRAM, make firrtl macro val mems: Option[Seq[Macro]] = (memFileFormat match { case Some("conf") => Utils.readConfFromPath(Some(memFile)) - case _ => mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) + case _ => mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) }) match { - case Some(x:Seq[mdf.macrolib.Macro]) => - Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) + case Some(x: Seq[mdf.macrolib.Macro]) => + Some(Utils.filterForSRAM(Some(x)).getOrElse(List()).map { new Macro(_) }) case _ => None } val libs: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { - case Some(x:Seq[mdf.macrolib.Macro]) => - Some(Utils.filterForSRAM(Some(x)) getOrElse(List()) map {new Macro(_)}) + case Some(x: Seq[mdf.macrolib.Macro]) => + Some(Utils.filterForSRAM(Some(x)).getOrElse(List()).map { new Macro(_) }) case _ => None } val compilers: Option[mdf.macrolib.SRAMCompiler] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { - case Some(x:Seq[mdf.macrolib.Macro]) => - if(useCompiler){ + case Some(x: Seq[mdf.macrolib.Macro]) => + if (useCompiler) { findSRAMCompiler(Some(x)) - } - else None + } else None case _ => None } - // Helper function to turn a set of mem names into a Seq[Macro]. def setToSeqMacro(names: Set[String]): Seq[Macro] = { names.toSeq.map(memName => mems.get.collectFirst { case m if m.src.name == memName => m }.get) @@ -706,12 +755,16 @@ class MacroCompilerTransform extends Transform { val transforms = Seq( new MacroCompilerPass(memCompile, libs, compilers, hammerIR, costMetric, mode), - new SynFlopsPass(true, memSynflops ++ (if (mode == MacroCompilerAnnotation.CompileAndSynflops) { - libs.get - } else { - Seq.empty - }))) - (transforms foldLeft state) ((s, xform) => xform runTransform s).copy(form = outputForm) + new SynFlopsPass( + true, + memSynflops ++ (if (mode == MacroCompilerAnnotation.CompileAndSynflops) { + libs.get + } else { + Seq.empty + }) + ) + ) + (transforms.foldLeft(state))((s, xform) => xform.runTransform(s)).copy(form = outputForm) case _ => state } } @@ -729,7 +782,8 @@ class MacroCompilerOptimizations extends SeqTransform { new firrtl.transforms.ConstantPropagation, passes.Legalize, passes.SplitExpressions, - passes.CommonSubexpressionElimination) + passes.CommonSubexpressionElimination + ) } class MacroCompiler extends Compiler { @@ -756,8 +810,9 @@ object MacroCompiler extends App { type MacroParamMap = Map[MacroParam, String] type CostParamMap = Map[String, String] type ForcedMemories = (Set[String], Set[String]) - val modeOptions: Seq[String] = MacroCompilerAnnotation.options - .map { case (_, cmd, description) => s" $cmd: $description" } + val modeOptions: Seq[String] = MacroCompilerAnnotation.options.map { case (_, cmd, description) => + s" $cmd: $description" + } val usage: String = (Seq( "Options:", " -n, --macro-conf: The set of macros to compile in firrtl-generated conf format (exclusive with -m)", @@ -772,16 +827,20 @@ object MacroCompiler extends App { " --force-compile [mem]: Force the given memory to be compiled to target libs regardless of the mode", " --force-synflops [mem]: Force the given memory to be compiled via synflops regardless of the mode", " --mode:" - ) ++ modeOptions) mkString "\n" - - def parseArgs(map: MacroParamMap, costMap: CostParamMap, forcedMemories: ForcedMemories, - args: List[String]): (MacroParamMap, CostParamMap, ForcedMemories) = + ) ++ modeOptions).mkString("\n") + + def parseArgs( + map: MacroParamMap, + costMap: CostParamMap, + forcedMemories: ForcedMemories, + args: List[String] + ): (MacroParamMap, CostParamMap, ForcedMemories) = args match { case Nil => (map, costMap, forcedMemories) case ("-n" | "--macro-conf") :: value :: tail => - parseArgs(map + (Macros -> value) + (MacrosFormat -> "conf"), costMap, forcedMemories, tail) + parseArgs(map + (Macros -> value) + (MacrosFormat -> "conf"), costMap, forcedMemories, tail) case ("-m" | "--macro-mdf") :: value :: tail => - parseArgs(map + (Macros -> value) + (MacrosFormat -> "mdf"), costMap, forcedMemories, tail) + parseArgs(map + (Macros -> value) + (MacrosFormat -> "mdf"), costMap, forcedMemories, tail) case ("-l" | "--library") :: value :: tail => parseArgs(map + (Library -> value), costMap, forcedMemories, tail) case ("-u" | "--use-compiler") :: tail => @@ -809,11 +868,17 @@ object MacroCompiler extends App { } def run(args: List[String]) { - val (params, costParams, forcedMemories) = parseArgs(Map[MacroParam, String](), Map[String, String](), (Set.empty, Set.empty), args) + val (params, costParams, forcedMemories) = + parseArgs(Map[MacroParam, String](), Map[String, String](), (Set.empty, Set.empty), args) try { val macros = params.get(MacrosFormat) match { - case Some("conf") => Utils.filterForSRAM(Utils.readConfFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) - case _ => Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get map (x => (new Macro(x)).blackbox) + case Some("conf") => + Utils.filterForSRAM(Utils.readConfFromPath(params.get(Macros))).get.map(x => (new Macro(x)).blackbox) + case _ => + Utils + .filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))) + .get + .map(x => (new Macro(x)).blackbox) } if (macros.nonEmpty) { @@ -821,24 +886,28 @@ object MacroCompiler extends App { // determined as the firrtl "top-level module". val circuit = Circuit(NoInfo, macros, macros.last.name) val annotations = AnnotationSeq( - Seq(MacroCompilerAnnotation( - circuit.main, - MacroCompilerAnnotation.Params( - params.get(Macros).get, params.get(MacrosFormat), params.get(Library), - params.get(HammerIR), - CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), - MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")), - params.contains(UseCompiler), - forceCompile = forcedMemories._1, forceSynflops = forcedMemories._2 + Seq( + MacroCompilerAnnotation( + circuit.main, + MacroCompilerAnnotation.Params( + params.get(Macros).get, + params.get(MacrosFormat), + params.get(Library), + params.get(HammerIR), + CostMetric.getCostMetric(params.getOrElse(CostFunc, "default"), costParams), + MacroCompilerAnnotation.stringToCompilerMode(params.getOrElse(Mode, "default")), + params.contains(UseCompiler), + forceCompile = forcedMemories._1, + forceSynflops = forcedMemories._2 + ) ) - )) + ) ) // The actual MacroCompilerTransform basically just generates an input circuit val macroCompilerInput = CircuitState(circuit, MidForm, annotations) val macroCompiled = (new MacroCompilerTransform).execute(macroCompilerInput) - // Since the MacroCompiler defines its own CLI, reconcile this with FIRRTL options val firOptions = new ExecutionOptionsManager("macrocompiler") with HasFirrtlOptions { firrtlOptions = FirrtlExecutionOptions( @@ -864,7 +933,7 @@ object MacroCompiler extends App { } } else { // Warn user - System.err println "WARNING: Empty *.mems.conf file. No memories generated." + System.err.println("WARNING: Empty *.mems.conf file. No memories generated.") // Emit empty verilog file if no macros found params.get(Verilog) match { diff --git a/macros/src/main/scala/barstools/macros/SynFlops.scala b/macros/src/main/scala/barstools/macros/SynFlops.scala index df7390d0a..77ea4c962 100644 --- a/macros/src/main/scala/barstools/macros/SynFlops.scala +++ b/macros/src/main/scala/barstools/macros/SynFlops.scala @@ -10,132 +10,142 @@ import firrtl.passes.MemPortUtils.memPortField class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pass { val extraMods = scala.collection.mutable.ArrayBuffer.empty[Module] - lazy val libMods = (libs map { lib => lib.src.name -> { - val (dataType, dataWidth) = (lib.src.ports foldLeft (None: Option[BigInt]))((res, port) => - (res, port.maskPort) match { - case (_, None) => - res - case (None, Some(_)) => - Some(port.effectiveMaskGran) - case (Some(x), Some(_)) => - assert(x == port.effectiveMaskGran) - res + lazy val libMods = (libs.map { lib => + lib.src.name -> { + val (dataType, dataWidth) = (lib.src.ports.foldLeft(None: Option[BigInt]))((res, port) => + (res, port.maskPort) match { + case (_, None) => + res + case (None, Some(_)) => + Some(port.effectiveMaskGran) + case (Some(x), Some(_)) => + assert(x == port.effectiveMaskGran) + res + } + ) match { + case None => (UIntType(IntWidth(lib.src.width)), lib.src.width) + case Some(gran) => (UIntType(IntWidth(gran)), gran.intValue) } - ) match { - case None => (UIntType(IntWidth(lib.src.width)), lib.src.width) - case Some(gran) => (UIntType(IntWidth(gran)), gran.intValue) - } - - val maxDepth = min(lib.src.depth, 1<<26) - val numMems = lib.src.depth / maxDepth - // Change macro to be mapped onto to look like the below mem - // by changing its depth, and width - val lib_macro = new Macro(lib.src.copy(name="split_"+lib.src.name, - depth = maxDepth, width = dataWidth, ports = lib.src.ports.map(p => - p.copy(width = p.width.map(_ => dataWidth), depth = p.depth.map(_ => maxDepth), - maskGran = p.maskGran.map(_ => dataWidth))))) - val mod_macro = (new MacroCompilerPass(None,None,None,None)).compile(lib, lib_macro) - val (real_mod, real_macro) = mod_macro.get + val maxDepth = min(lib.src.depth, 1 << 26) + val numMems = lib.src.depth / maxDepth - val mem = DefMemory( - NoInfo, - "ram", - dataType, - maxDepth, - 1, // writeLatency - 1, // readLatency. This is possible because of VerilogMemDelays - real_macro.readers.indices map (i => s"R_$i"), - real_macro.writers.indices map (i => s"W_$i"), - real_macro.readwriters.indices map (i => s"RW_$i") - ) - - val readConnects = real_macro.readers.zipWithIndex flatMap { case (r, i) => - val clock = portToExpression(r.src.clock.get) - val address = portToExpression(r.src.address) - val enable = (r.src chipEnable, r.src readEnable) match { - case (Some(en_port), Some(re_port)) => - and(portToExpression(en_port), - portToExpression(re_port)) - case (Some(en_port), None) => portToExpression(en_port) - case (None, Some(re_port)) => portToExpression(re_port) - case (None, None) => one - } - val data = memPortField(mem, s"R_$i", "data") - val read = data - Seq( - Connect(NoInfo, memPortField(mem, s"R_$i", "clk"), clock), - Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), address), - Connect(NoInfo, memPortField(mem, s"R_$i", "en"), enable), - Connect(NoInfo, WRef(r.src.output.get.name), read) + // Change macro to be mapped onto to look like the below mem + // by changing its depth, and width + val lib_macro = new Macro( + lib.src.copy( + name = "split_" + lib.src.name, + depth = maxDepth, + width = dataWidth, + ports = lib.src.ports.map(p => + p.copy( + width = p.width.map(_ => dataWidth), + depth = p.depth.map(_ => maxDepth), + maskGran = p.maskGran.map(_ => dataWidth) + ) + ) + ) ) - } + val mod_macro = (new MacroCompilerPass(None, None, None, None)).compile(lib, lib_macro) + val (real_mod, real_macro) = mod_macro.get - val writeConnects = real_macro.writers.zipWithIndex flatMap { case (w, i) => - val clock = portToExpression(w.src.clock.get) - val address = portToExpression(w.src.address) - val enable = (w.src.chipEnable, w.src.writeEnable) match { - case (Some(en), Some(we)) => - and(portToExpression(en), - portToExpression(we)) - case (Some(en), None) => portToExpression(en) - case (None, Some(we)) => portToExpression(we) - case (None, None) => zero // is it possible? - } - val mask = w.src.maskPort match { - case Some(m) => portToExpression(m) - case None => one - } - val data = memPortField(mem, s"W_$i", "data") - val write = portToExpression(w.src.input.get) - Seq( - Connect(NoInfo, memPortField(mem, s"W_$i", "clk"), clock), - Connect(NoInfo, memPortField(mem, s"W_$i", "addr"), address), - Connect(NoInfo, memPortField(mem, s"W_$i", "en"), enable), - Connect(NoInfo, memPortField(mem, s"W_$i", "mask"), mask), - Connect(NoInfo, data, write) + val mem = DefMemory( + NoInfo, + "ram", + dataType, + maxDepth, + 1, // writeLatency + 1, // readLatency. This is possible because of VerilogMemDelays + real_macro.readers.indices.map(i => s"R_$i"), + real_macro.writers.indices.map(i => s"W_$i"), + real_macro.readwriters.indices.map(i => s"RW_$i") ) - } - val readwriteConnects = real_macro.readwriters.zipWithIndex flatMap { case (rw, i) => - val clock = portToExpression(rw.src.clock.get) - val address = portToExpression(rw.src.address) - val wmode = rw.src.writeEnable match { - case Some(we) => portToExpression(we) - case None => zero // is it possible? + val readConnects = real_macro.readers.zipWithIndex.flatMap { case (r, i) => + val clock = portToExpression(r.src.clock.get) + val address = portToExpression(r.src.address) + val enable = (r.src chipEnable, r.src readEnable) match { + case (Some(en_port), Some(re_port)) => + and(portToExpression(en_port), portToExpression(re_port)) + case (Some(en_port), None) => portToExpression(en_port) + case (None, Some(re_port)) => portToExpression(re_port) + case (None, None) => one + } + val data = memPortField(mem, s"R_$i", "data") + val read = data + Seq( + Connect(NoInfo, memPortField(mem, s"R_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"R_$i", "addr"), address), + Connect(NoInfo, memPortField(mem, s"R_$i", "en"), enable), + Connect(NoInfo, WRef(r.src.output.get.name), read) + ) } - val wmask = rw.src.maskPort match { - case Some(wm) => portToExpression(wm) - case None => one + + val writeConnects = real_macro.writers.zipWithIndex.flatMap { case (w, i) => + val clock = portToExpression(w.src.clock.get) + val address = portToExpression(w.src.address) + val enable = (w.src.chipEnable, w.src.writeEnable) match { + case (Some(en), Some(we)) => + and(portToExpression(en), portToExpression(we)) + case (Some(en), None) => portToExpression(en) + case (None, Some(we)) => portToExpression(we) + case (None, None) => zero // is it possible? + } + val mask = w.src.maskPort match { + case Some(m) => portToExpression(m) + case None => one + } + val data = memPortField(mem, s"W_$i", "data") + val write = portToExpression(w.src.input.get) + Seq( + Connect(NoInfo, memPortField(mem, s"W_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"W_$i", "addr"), address), + Connect(NoInfo, memPortField(mem, s"W_$i", "en"), enable), + Connect(NoInfo, memPortField(mem, s"W_$i", "mask"), mask), + Connect(NoInfo, data, write) + ) } - val enable = (rw.src.chipEnable, rw.src.readEnable) match { - case (Some(en), Some(re)) => - and(portToExpression(en), or(portToExpression(re), wmode)) - case (Some(en), None) => portToExpression(en) - case (None, Some(re)) => or(portToExpression(re), wmode) - case (None, None) => one + + val readwriteConnects = real_macro.readwriters.zipWithIndex.flatMap { case (rw, i) => + val clock = portToExpression(rw.src.clock.get) + val address = portToExpression(rw.src.address) + val wmode = rw.src.writeEnable match { + case Some(we) => portToExpression(we) + case None => zero // is it possible? + } + val wmask = rw.src.maskPort match { + case Some(wm) => portToExpression(wm) + case None => one + } + val enable = (rw.src.chipEnable, rw.src.readEnable) match { + case (Some(en), Some(re)) => + and(portToExpression(en), or(portToExpression(re), wmode)) + case (Some(en), None) => portToExpression(en) + case (None, Some(re)) => or(portToExpression(re), wmode) + case (None, None) => one + } + val wdata = memPortField(mem, s"RW_$i", "wdata") + val rdata = memPortField(mem, s"RW_$i", "rdata") + val write = portToExpression(rw.src.input.get) + val read = rdata + Seq( + Connect(NoInfo, memPortField(mem, s"RW_$i", "clk"), clock), + Connect(NoInfo, memPortField(mem, s"RW_$i", "addr"), address), + Connect(NoInfo, memPortField(mem, s"RW_$i", "en"), enable), + Connect(NoInfo, memPortField(mem, s"RW_$i", "wmode"), wmode), + Connect(NoInfo, memPortField(mem, s"RW_$i", "wmask"), wmask), + Connect(NoInfo, WRef(rw.src.output.get.name), read), + Connect(NoInfo, wdata, write) + ) } - val wdata = memPortField(mem, s"RW_$i", "wdata") - val rdata = memPortField(mem, s"RW_$i", "rdata") - val write = portToExpression(rw.src.input.get) - val read = rdata - Seq( - Connect(NoInfo, memPortField(mem, s"RW_$i", "clk"), clock), - Connect(NoInfo, memPortField(mem, s"RW_$i", "addr"), address), - Connect(NoInfo, memPortField(mem, s"RW_$i", "en"), enable), - Connect(NoInfo, memPortField(mem, s"RW_$i", "wmode"), wmode), - Connect(NoInfo, memPortField(mem, s"RW_$i", "wmask"), wmask), - Connect(NoInfo, WRef(rw.src.output.get.name), read), - Connect(NoInfo, wdata, write) - ) - } - extraMods.append(real_macro.module(Block(mem +: (readConnects ++ writeConnects ++ readwriteConnects)))) - real_mod - }}).toMap + extraMods.append(real_macro.module(Block(mem +: (readConnects ++ writeConnects ++ readwriteConnects)))) + real_mod + } + }).toMap def run(c: Circuit): Circuit = { if (!synflops) c - else c.copy(modules = (c.modules map (m => libMods.getOrElse(m.name, m))) ++ extraMods) + else c.copy(modules = (c.modules.map(m => libMods.getOrElse(m.name, m))) ++ extraMods) } } diff --git a/macros/src/main/scala/barstools/macros/Utils.scala b/macros/src/main/scala/barstools/macros/Utils.scala index a65e3a8a0..9afa51f37 100644 --- a/macros/src/main/scala/barstools/macros/Utils.scala +++ b/macros/src/main/scala/barstools/macros/Utils.scala @@ -11,7 +11,7 @@ import mdf.macrolib.{Input => _, Output => _, _} import scala.language.implicitConversions object MacroCompilerMath { - def ceilLog2(x: BigInt): Int = (x-1).bitLength + def ceilLog2(x: BigInt): Int = (x - 1).bitLength } class FirrtlMacroPort(port: MacroPort) { @@ -21,37 +21,46 @@ class FirrtlMacroPort(port: MacroPort) { val isWriter = port.input.nonEmpty && port.output.isEmpty val isReadWriter = port.input.nonEmpty && port.output.nonEmpty - val addrType = UIntType(IntWidth(MacroCompilerMath.ceilLog2(port.depth.get) max 1)) + val addrType = UIntType(IntWidth(MacroCompilerMath.ceilLog2(port.depth.get).max(1))) val dataType = UIntType(IntWidth(port.width.get)) val maskType = UIntType(IntWidth(port.width.get / port.effectiveMaskGran)) // Bundle representing this macro port. - val tpe = BundleType(Seq( - Field(port.address.name, Flip, addrType)) ++ - (port.clock map (p => Field(p.name, Flip, ClockType))) ++ - (port.input map (p => Field(p.name, Flip, dataType))) ++ - (port.output map (p => Field(p.name, Default, dataType))) ++ - (port.chipEnable map (p => Field(p.name, Flip, BoolType))) ++ - (port.readEnable map (p => Field(p.name, Flip, BoolType))) ++ - (port.writeEnable map (p => Field(p.name, Flip, BoolType))) ++ - (port.maskPort map (p => Field(p.name, Flip, maskType))) + val tpe = BundleType( + Seq(Field(port.address.name, Flip, addrType)) ++ + (port.clock.map(p => Field(p.name, Flip, ClockType))) ++ + (port.input.map(p => Field(p.name, Flip, dataType))) ++ + (port.output.map(p => Field(p.name, Default, dataType))) ++ + (port.chipEnable.map(p => Field(p.name, Flip, BoolType))) ++ + (port.readEnable.map(p => Field(p.name, Flip, BoolType))) ++ + (port.writeEnable.map(p => Field(p.name, Flip, BoolType))) ++ + (port.maskPort.map(p => Field(p.name, Flip, maskType))) + ) + val ports = tpe.fields.map(f => + Port( + NoInfo, + f.name, + f.flip match { + case Default => Output + case Flip => Input + }, + f.tpe + ) ) - val ports = tpe.fields map (f => Port( - NoInfo, f.name, f.flip match { case Default => Output case Flip => Input }, f.tpe)) } // Reads an SRAMMacro and generates firrtl blackboxes. class Macro(srcMacro: SRAMMacro) { val src = srcMacro - val firrtlPorts = srcMacro.ports map { new FirrtlMacroPort(_) } + val firrtlPorts = srcMacro.ports.map { new FirrtlMacroPort(_) } - val writers = firrtlPorts filter (p => p.isWriter) - val readers = firrtlPorts filter (p => p.isReader) - val readwriters = firrtlPorts filter (p => p.isReadWriter) + val writers = firrtlPorts.filter(p => p.isWriter) + val readers = firrtlPorts.filter(p => p.isReader) + val readwriters = firrtlPorts.filter(p => p.isReadWriter) val sortedPorts = writers ++ readers ++ readwriters - val extraPorts = srcMacro.extraPorts map { p => + val extraPorts = srcMacro.extraPorts.map { p => assert(p.portType == Constant) // TODO: release it? val name = p.name val width = BigInt(p.width.toLong) @@ -60,10 +69,10 @@ class Macro(srcMacro: SRAMMacro) { } // Bundle representing this memory blackbox - val tpe = BundleType(firrtlPorts flatMap (_.tpe.fields)) + val tpe = BundleType(firrtlPorts.flatMap(_.tpe.fields)) - private val modPorts = (firrtlPorts flatMap (_.ports)) ++ - (extraPorts map { case (name, value) => Port(NoInfo, name, Input, value.tpe) }) + private val modPorts = (firrtlPorts.flatMap(_.ports)) ++ + (extraPorts.map { case (name, value) => Port(NoInfo, name, Input, value.tpe) }) val blackbox = ExtModule(NoInfo, srcMacro.name, modPorts, srcMacro.name, Nil) def module(body: Statement) = Module(NoInfo, srcMacro.name, modPorts, body) } @@ -71,7 +80,8 @@ class Macro(srcMacro: SRAMMacro) { object Utils { def filterForSRAM(s: Option[Seq[mdf.macrolib.Macro]]): Option[Seq[mdf.macrolib.SRAMMacro]] = { s match { - case Some(l:Seq[mdf.macrolib.Macro]) => Some(l filter { _.isInstanceOf[mdf.macrolib.SRAMMacro] } map { m => m.asInstanceOf[mdf.macrolib.SRAMMacro] }) + case Some(l: Seq[mdf.macrolib.Macro]) => + Some(l.filter { _.isInstanceOf[mdf.macrolib.SRAMMacro] }.map { m => m.asInstanceOf[mdf.macrolib.SRAMMacro] }) case _ => None } } @@ -80,18 +90,24 @@ object Utils { path.map((p) => Utils.readConfFromString(scala.io.Source.fromFile(p).mkString)) } def readConfFromString(str: String): Seq[mdf.macrolib.Macro] = { - MemConf.fromString(str).map { m:MemConf => - val ports = m.ports.map { case (port, num) => Seq.fill(num)(port) } reduce (_ ++ _) - SRAMMacro(m.name, m.width, m.depth, Utils.portSpecToFamily(ports), Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, ports)) + MemConf.fromString(str).map { m: MemConf => + val ports = m.ports.map { case (port, num) => Seq.fill(num)(port) }.reduce(_ ++ _) + SRAMMacro( + m.name, + m.width, + m.depth, + Utils.portSpecToFamily(ports), + Utils.portSpecToMacroPort(m.width, m.depth, m.maskGranularity, ports) + ) } } def portSpecToFamily(ports: Seq[MemPort]): String = { - val numR = ports.count(_ match { case ReadPort => true; case _ => false}) - val numW = ports.count(_ match { case WritePort|MaskedWritePort => true; case _ => false}) - val numRW = ports.count(_ match { case ReadWritePort|MaskedReadWritePort => true; case _ => false}) - val numRStr = if(numR > 0) s"${numR}r" else "" - val numWStr = if(numW > 0) s"${numW}w" else "" - val numRWStr = if(numRW > 0) s"${numRW}rw" else "" + val numR = ports.count(_ match { case ReadPort => true; case _ => false }) + val numW = ports.count(_ match { case WritePort | MaskedWritePort => true; case _ => false }) + val numRW = ports.count(_ match { case ReadWritePort | MaskedReadWritePort => true; case _ => false }) + val numRStr = if (numR > 0) s"${numR}r" else "" + val numWStr = if (numW > 0) s"${numW}w" else "" + val numRWStr = if (numRW > 0) s"${numRW}rw" else "" return numRStr + numWStr + numRWStr } // This translates between two represenations of ports @@ -99,94 +115,128 @@ object Utils { var numR = 0 var numW = 0 var numRW = 0 - ports.map { _ match { - case ReadPort => { - val portName = s"R${numR}" - numR += 1 - MacroPort( - width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - readEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - output=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) - ) } - case WritePort => { - val portName = s"W${numW}" - numW += 1 - MacroPort( - width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - writeEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - input=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) - ) } - case MaskedWritePort => { - val portName = s"W${numW}" - numW += 1 - MacroPort( - width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - writeEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - maskPort=Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), - maskGran=maskGran, - input=Some(PolarizedPort(s"${portName}_data", ActiveHigh)) - ) } - case ReadWritePort => { - val portName = s"RW${numRW}" - numRW += 1 - MacroPort( - width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - writeEnable=Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), - input=Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), - output=Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) - ) } - case MaskedReadWritePort => { - val portName = s"RW${numRW}" - numRW += 1 - MacroPort( - width=Some(width), depth=Some(depth), - address=PolarizedPort(s"${portName}_addr", ActiveHigh), - clock=Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - chipEnable=Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - writeEnable=Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), - maskPort=Some(PolarizedPort(s"${portName}_wmask", ActiveHigh)), - maskGran=maskGran, - input=Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), - output=Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) - ) } - }} + ports.map { + _ match { + case ReadPort => { + val portName = s"R${numR}" + numR += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + readEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + } + case WritePort => { + val portName = s"W${numW}" + numW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + writeEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + input = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + } + case MaskedWritePort => { + val portName = s"W${numW}" + numW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + writeEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + maskPort = Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), + maskGran = maskGran, + input = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + } + case ReadWritePort => { + val portName = s"RW${numRW}" + numRW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + chipEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + writeEnable = Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), + input = Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) + ) + } + case MaskedReadWritePort => { + val portName = s"RW${numRW}" + numRW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + chipEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + writeEnable = Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), + maskPort = Some(PolarizedPort(s"${portName}_wmask", ActiveHigh)), + maskGran = maskGran, + input = Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) + ) + } + } + } } def findSRAMCompiler(s: Option[Seq[mdf.macrolib.Macro]]): Option[mdf.macrolib.SRAMCompiler] = { s match { - case Some(l:Seq[mdf.macrolib.Macro]) => - l collectFirst { - case x: mdf.macrolib.SRAMCompiler => x + case Some(l: Seq[mdf.macrolib.Macro]) => + l.collectFirst { case x: mdf.macrolib.SRAMCompiler => + x } case _ => None } } def buildSRAMMacros(s: mdf.macrolib.SRAMCompiler): Seq[mdf.macrolib.SRAMMacro] = { - for (g <- s.groups; d <- g.depth; w <- g.width; vt <- g.vt) - yield mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), vt, g.mux, g.extraPorts) + for { + g <- s.groups + d <- g.depth + w <- g.width + vt <- g.vt + } yield mdf.macrolib.SRAMMacro( + makeName(g, d, w, vt), + w, + d, + g.family, + g.ports.map(_.copy(width = Some(w), depth = Some(d))), + vt, + g.mux, + g.extraPorts + ) } def buildSRAMMacro(g: mdf.macrolib.SRAMGroup, d: Int, w: Int, vt: String): mdf.macrolib.SRAMMacro = { - return mdf.macrolib.SRAMMacro(makeName(g, d, w, vt), w, d, g.family, g.ports.map(_.copy(width=Some(w), depth=Some(d))), vt, g.mux, g.extraPorts) + return mdf.macrolib.SRAMMacro( + makeName(g, d, w, vt), + w, + d, + g.family, + g.ports.map(_.copy(width = Some(w), depth = Some(d))), + vt, + g.mux, + g.extraPorts + ) } def makeName(g: mdf.macrolib.SRAMGroup, depth: Int, width: Int, vt: String): String = { - g.name.foldLeft(""){ (builder, next) => + g.name.foldLeft("") { (builder, next) => next match { - case "depth"|"DEPTH" => builder + depth - case "width"|"WIDTH" => builder + width - case "vt" => builder + vt.toLowerCase - case "VT" => builder + vt.toUpperCase - case "family" => builder + g.family.toLowerCase - case "FAMILY" => builder + g.family.toUpperCase - case "mux"|"MUX" => builder + g.mux - case other => builder + other + case "depth" | "DEPTH" => builder + depth + case "width" | "WIDTH" => builder + width + case "vt" => builder + vt.toLowerCase + case "VT" => builder + vt.toUpperCase + case "family" => builder + g.family.toLowerCase + case "FAMILY" => builder + g.family.toUpperCase + case "mux" | "MUX" => builder + g.mux + case other => builder + other } } } @@ -196,7 +246,7 @@ object Utils { def or(e1: Expression, e2: Expression) = DoPrim(PrimOps.Or, Seq(e1, e2), Nil, e1.tpe) def bits(e: Expression, high: BigInt, low: BigInt): Expression = - DoPrim(PrimOps.Bits, Seq(e), Seq(high, low), UIntType(IntWidth(high-low+1))) + DoPrim(PrimOps.Bits, Seq(e), Seq(high, low), UIntType(IntWidth(high - low + 1))) def bits(e: Expression, idx: BigInt): Expression = bits(e, idx, idx) def cat(es: Seq[Expression]): Expression = if (es.size == 1) es.head @@ -211,7 +261,7 @@ object Utils { def portToExpression(exp: Expression, polarity: Option[PortPolarity]): Expression = polarity match { case Some(ActiveLow) | Some(NegativeEdge) => not(exp) - case _ => exp + case _ => exp } // Check if a number is a power of two diff --git a/macros/src/test/resources/lib-MaskPortTest.json b/macros/src/test/resources/lib-MaskPortTest.json index 72df79474..784aeafbb 100644 --- a/macros/src/test/resources/lib-MaskPortTest.json +++ b/macros/src/test/resources/lib-MaskPortTest.json @@ -1,27 +1,29 @@ [ { - "type" : "sram", - "name" : "fake_mem", - "width" : 64, - "depth" : "512", - "mux" : 4, - "family" : "1rw", - "ports" : [ { - "address port name" : "addr", - "address port polarity" : "active high", - "clock port name" : "clk", - "clock port polarity" : "positive edge", - "write enable port name" : "wen", - "write enable port polarity" : "active high", - "read enable port name" : "ren", - "read enable port polarity" : "active high", - "output port name" : "dataout", - "output port polarity" : "active high", - "input port name" : "datain", - "input port polarity" : "active high", - "mask port name" : "mport", - "mask port polarity" : "active low", - "mask granularity" : 1 - } ] + "type": "sram", + "name": "fake_mem", + "width": 64, + "depth": "512", + "mux": 4, + "family": "1rw", + "ports": [ + { + "address port name": "addr", + "address port polarity": "active high", + "clock port name": "clk", + "clock port polarity": "positive edge", + "write enable port name": "wen", + "write enable port polarity": "active high", + "read enable port name": "ren", + "read enable port polarity": "active high", + "output port name": "dataout", + "output port polarity": "active high", + "input port name": "datain", + "input port polarity": "active high", + "mask port name": "mport", + "mask port polarity": "active low", + "mask granularity": 1 + } + ] } ] diff --git a/macros/src/test/resources/lib-WriteEnableTest.json b/macros/src/test/resources/lib-WriteEnableTest.json index be7852a6c..50acef413 100644 --- a/macros/src/test/resources/lib-WriteEnableTest.json +++ b/macros/src/test/resources/lib-WriteEnableTest.json @@ -1,24 +1,26 @@ [ { - "type" : "sram", - "name" : "fake_mem", - "width" : 64, - "depth" : "4096", - "mux" : 4, - "family" : "1rw", - "ports" : [ { - "address port name" : "addr", - "address port polarity" : "active high", - "clock port name" : "clk", - "clock port polarity" : "positive edge", - "write enable port name" : "wen", - "write enable port polarity" : "active high", - "read enable port name" : "ren", - "read enable port polarity" : "active high", - "output port name" : "dataout", - "output port polarity" : "active high", - "input port name" : "datain", - "input port polarity" : "active high" - } ] + "type": "sram", + "name": "fake_mem", + "width": 64, + "depth": "4096", + "mux": 4, + "family": "1rw", + "ports": [ + { + "address port name": "addr", + "address port polarity": "active high", + "clock port name": "clk", + "clock port polarity": "positive edge", + "write enable port name": "wen", + "write enable port polarity": "active high", + "read enable port name": "ren", + "read enable port polarity": "active high", + "output port name": "dataout", + "output port polarity": "active high", + "input port name": "datain", + "input port polarity": "active high" + } + ] } ] diff --git a/macros/src/test/scala/barstools/macros/CostFunction.scala b/macros/src/test/scala/barstools/macros/CostFunction.scala index b8a27f7fe..ceb7a61ae 100644 --- a/macros/src/test/scala/barstools/macros/CostFunction.scala +++ b/macros/src/test/scala/barstools/macros/CostFunction.scala @@ -4,10 +4,9 @@ import mdf.macrolib._ /** Tests to check that the cost function mechanism is working properly. */ -/** - * A test metric that simply favours memories with smaller widths, to test that - * the metric is chosen properly. - */ +/** A test metric that simply favours memories with smaller widths, to test that + * the metric is chosen properly. + */ object TestMinWidthMetric extends CostMetric with CostMetricCompanion { // Smaller width = lower cost = favoured override def cost(mem: Macro, lib: Macro): Option[Double] = Some(lib.src.width) @@ -30,29 +29,29 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { val libSRAMs = Seq( SRAMMacro( - name="SRAM_WIDTH_128", - depth=BigInt(1024), - width=128, - family="1rw", - ports=Seq( + name = "SRAM_WIDTH_128", + depth = BigInt(1024), + width = 128, + family = "1rw", + ports = Seq( generateReadWritePort("", 128, BigInt(1024)) ) ), SRAMMacro( - name="SRAM_WIDTH_64", - depth=BigInt(1024), - width=64, - family="1rw", - ports=Seq( + name = "SRAM_WIDTH_64", + depth = BigInt(1024), + width = 64, + family = "1rw", + ports = Seq( generateReadWritePort("", 64, BigInt(1024)) ) ), SRAMMacro( - name="SRAM_WIDTH_32", - depth=BigInt(1024), - width=32, - family="1rw", - ports=Seq( + name = "SRAM_WIDTH_32", + depth = BigInt(1024), + width = 32, + family = "1rw", + ports = Seq( generateReadWritePort("", 32, BigInt(1024)) ) ) @@ -65,7 +64,7 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { // Check that the min width SRAM was chosen, even though it is less efficient. val output = -""" + """ circuit target_memory : module target_memory : input addr : UInt<10> diff --git a/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala b/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala index cf84e5004..9140ce24d 100644 --- a/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala +++ b/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala @@ -18,12 +18,12 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate // Override these to change the prefixing of macroDir and testDir val memPrefix: String = testDir val libPrefix: String = testDir - val vPrefix: String = testDir + val vPrefix: String = testDir // Override this to use a different cost metric. // If this is None, the compile() call will not have any -c/-cp arguments, and // execute() will use CostMetric.default. - val costMetric: Option[CostMetric] = None + val costMetric: Option[CostMetric] = None private def getCostMetric: CostMetric = costMetric.getOrElse(CostMetric.default) private def costMetricCmdLine = { @@ -32,17 +32,20 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate case Some(m) => { val name = m.name val params = m.commandLineParams - List("-c", name) ++ params.flatMap{ case (key, value) => List("-cp", key, value) } + List("-c", name) ++ params.flatMap { case (key, value) => List("-cp", key, value) } } } } private def args(mem: String, lib: Option[String], v: String, synflops: Boolean, useCompiler: Boolean) = List("-m", mem.toString, "-v", v) ++ - (lib match { case None => Nil case Some(l) => List("-l", l.toString) }) ++ - costMetricCmdLine ++ - (if (synflops) List("--mode", "synflops") else Nil) ++ - (if (useCompiler) List("--use-compiler") else Nil) + (lib match { + case None => Nil + case Some(l) => List("-l", l.toString) + }) ++ + costMetricCmdLine ++ + (if (synflops) List("--mode", "synflops") else Nil) ++ + (if (useCompiler) List("--use-compiler") else Nil) // Run the full compiler as if from the command line interface. // Generates the Verilog; useful in testing since an error will throw an @@ -68,7 +71,14 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate } // Convenience function for running both compile, execute, and test at once. - def compileExecuteAndTest(mem: String, lib: Option[String], v: String, output: String, synflops: Boolean = false, useCompiler: Boolean = false): Unit = { + def compileExecuteAndTest( + mem: String, + lib: Option[String], + v: String, + output: String, + synflops: Boolean = false, + useCompiler: Boolean = false + ): Unit = { compile(mem, lib, v, synflops, useCompiler) val result = execute(mem, lib, synflops, useCompiler) test(result, output) @@ -76,43 +86,53 @@ abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalate // Compare FIRRTL outputs after reparsing output with ScalaTest ("should be"). def test(result: Circuit, output: String): Unit = { - val gold = RemoveEmpty run parse(output) - (result.serialize) should be (gold.serialize) + val gold = RemoveEmpty.run(parse(output)) + (result.serialize) should be(gold.serialize) } // Execute the macro compiler and returns a Circuit containing the output of // the memory compiler. - def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean): Circuit = execute(memFile, libFile, synflops, false) + def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean): Circuit = + execute(memFile, libFile, synflops, false) def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean, useCompiler: Boolean): Circuit = { var mem_full = concat(memPrefix, memFile) var lib_full = concat(libPrefix, libFile) require(memFile.isDefined) - val mems: Seq[Macro] = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(mem_full)).get map (new Macro(_)) - val libs: Option[Seq[Macro]] = if(useCompiler) { - Utils.findSRAMCompiler(mdf.macrolib.Utils.readMDFFromPath(lib_full)).map{x => Utils.buildSRAMMacros(x).map(new Macro(_)) } + val mems: Seq[Macro] = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(mem_full)).get.map(new Macro(_)) + val libs: Option[Seq[Macro]] = if (useCompiler) { + Utils.findSRAMCompiler(mdf.macrolib.Utils.readMDFFromPath(lib_full)).map { x => + Utils.buildSRAMMacros(x).map(new Macro(_)) + } } else { Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(lib_full)) match { - case Some(x) => Some(x map (new Macro(_))) - case None => None + case Some(x) => Some(x.map(new Macro(_))) + case None => None } } - val macros = mems map (_.blackbox) + val macros = mems.map(_.blackbox) val circuit = Circuit(NoInfo, macros, macros.last.name) val passes = Seq( - new MacroCompilerPass(Some(mems), libs, None, None, getCostMetric, if (synflops) MacroCompilerAnnotation.Synflops else MacroCompilerAnnotation.Default), - new SynFlopsPass(synflops, libs getOrElse mems), - RemoveEmpty) - val result: Circuit = (passes foldLeft circuit)((c, pass) => pass run c) + new MacroCompilerPass( + Some(mems), + libs, + None, + None, + getCostMetric, + if (synflops) MacroCompilerAnnotation.Synflops else MacroCompilerAnnotation.Default + ), + new SynFlopsPass(synflops, libs.getOrElse(mems)), + RemoveEmpty + ) + val result: Circuit = (passes.foldLeft(circuit))((c, pass) => pass.run(c)) result } - // Helper method to deal with String + Option[String] - private def concat(a: String, b: String): String = {a + "/" + b} + private def concat(a: String, b: String): String = { a + "/" + b } private def concat(a: String, b: Option[String]): Option[String] = { b match { - case Some(b2:String) => Some(a + "/" + b2) + case Some(b2: String) => Some(a + "/" + b2) case _ => None } } @@ -123,20 +143,19 @@ trait HasSRAMGenerator { import mdf.macrolib._ import scala.language.implicitConversions - implicit def Int2SomeInt(i: Int): Option[Int] = Some(i) + implicit def Int2SomeInt(i: Int): Option[Int] = Some(i) implicit def BigInt2SomeBigInt(i: BigInt): Option[BigInt] = Some(i) - // Generate a standard (read/write/combo) port for testing. // Helper methods for optional width argument def generateTestPort( - prefix: String, - width: Option[Int], - depth: Option[BigInt], - maskGran: Option[Int] = None, - read: Boolean, - readEnable: Boolean = false, - write: Boolean, + prefix: String, + width: Option[Int], + depth: Option[BigInt], + maskGran: Option[Int] = None, + read: Boolean, + readEnable: Boolean = false, + write: Boolean, writeEnable: Boolean = false ): MacroPort = { val realPrefix = if (prefix == "") "" else prefix + "_" @@ -144,44 +163,70 @@ trait HasSRAMGenerator { MacroPort( address = PolarizedPort(name = realPrefix + "addr", polarity = ActiveHigh), clock = Some(PolarizedPort(name = realPrefix + "clk", polarity = PositiveEdge)), - readEnable = if (readEnable) Some(PolarizedPort(name = realPrefix + "read_en", polarity = ActiveHigh)) else None, - writeEnable = if (writeEnable) Some(PolarizedPort(name = realPrefix + "write_en", polarity = ActiveHigh)) else None, - + writeEnable = + if (writeEnable) Some(PolarizedPort(name = realPrefix + "write_en", polarity = ActiveHigh)) else None, output = if (read) Some(PolarizedPort(name = realPrefix + "dout", polarity = ActiveHigh)) else None, input = if (write) Some(PolarizedPort(name = realPrefix + "din", polarity = ActiveHigh)) else None, - maskPort = maskGran match { case Some(x: Int) => Some(PolarizedPort(name = realPrefix + "mask", polarity = ActiveHigh)) case _ => None }, maskGran = maskGran, - - width = width, depth = depth // These numbers don't matter here. + width = width, + depth = depth // These numbers don't matter here. ) } // Generate a read port for testing. - def generateReadPort(prefix: String, width: Option[Int], depth: Option[BigInt], readEnable: Boolean = false): MacroPort = { + def generateReadPort( + prefix: String, + width: Option[Int], + depth: Option[BigInt], + readEnable: Boolean = false + ): MacroPort = { generateTestPort(prefix, width, depth, write = false, read = true, readEnable = readEnable) } // Generate a write port for testing. - def generateWritePort(prefix: String, width: Option[Int], depth: Option[BigInt], maskGran: Option[Int] = None, writeEnable: Boolean = true): MacroPort = { + def generateWritePort( + prefix: String, + width: Option[Int], + depth: Option[BigInt], + maskGran: Option[Int] = None, + writeEnable: Boolean = true + ): MacroPort = { generateTestPort(prefix, width, depth, maskGran = maskGran, write = true, read = false, writeEnable = writeEnable) } // Generate a simple read-write port for testing. - def generateReadWritePort(prefix: String, width: Option[Int], depth: Option[BigInt], maskGran: Option[Int] = None): MacroPort = { + def generateReadWritePort( + prefix: String, + width: Option[Int], + depth: Option[BigInt], + maskGran: Option[Int] = None + ): MacroPort = { generateTestPort( - prefix, width, depth, maskGran = maskGran, - write = true, writeEnable = true, - read = true, readEnable = false + prefix, + width, + depth, + maskGran = maskGran, + write = true, + writeEnable = true, + read = true, + readEnable = false ) } // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). - def generateSRAM(name: String, prefix: String, width: Int, depth: BigInt, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMMacro = { + def generateSRAM( + name: String, + prefix: String, + width: Int, + depth: BigInt, + maskGran: Option[Int] = None, + extraPorts: Seq[MacroExtraPort] = List() + ): SRAMMacro = { SRAMMacro( name = name, width = width, @@ -193,17 +238,35 @@ trait HasSRAMGenerator { } // Generate a "simple" SRAM group (active high/positive edge, 1 read-write port). - def generateSimpleSRAMGroup(prefix: String, mux: Int, depth: Range, width: Range, maskGran: Option[Int] = None, extraPorts: Seq[MacroExtraPort] = List()): SRAMGroup = { - SRAMGroup(Seq("mygroup_", "width", "x", "depth", "_", "VT"), "1rw", Seq("svt", "lvt", "ulvt"), mux, depth, width, Seq(generateReadWritePort(prefix, None, None, maskGran))) + def generateSimpleSRAMGroup( + prefix: String, + mux: Int, + depth: Range, + width: Range, + maskGran: Option[Int] = None, + extraPorts: Seq[MacroExtraPort] = List() + ): SRAMGroup = { + SRAMGroup( + Seq("mygroup_", "width", "x", "depth", "_", "VT"), + "1rw", + Seq("svt", "lvt", "ulvt"), + mux, + depth, + width, + Seq(generateReadWritePort(prefix, None, None, maskGran)) + ) } // 'vt': ('svt','lvt','ulvt'), 'mux': 2, 'depth': range(16,513,8), 'width': range(8,289,2), 'ports': 1 // 'vt': ('svt','lvt','ulvt'), 'mux': 4, 'depth': range(32,1025,16), 'width': range(4,145), 'ports': 1} def generateSRAMCompiler(name: String, prefix: String): mdf.macrolib.SRAMCompiler = { - SRAMCompiler(name, Seq( - generateSimpleSRAMGroup(prefix, 2, Range(16, 512, 8), Range(8, 288, 2)), - generateSimpleSRAMGroup(prefix, 4, Range(32, 1024, 16), Range(4, 144, 1)) - )) + SRAMCompiler( + name, + Seq( + generateSimpleSRAMGroup(prefix, 2, Range(16, 512, 8), Range(8, 288, 2)), + generateSimpleSRAMGroup(prefix, 4, Range(32, 1024, 16), Range(4, 144, 1)) + ) + ) } } @@ -212,147 +275,156 @@ trait HasSRAMGenerator { // Override this generator to specify the expected FIRRTL output. trait HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => - // Override these with "override lazy val". - // Why lazy? These are used in the constructor here so overriding non-lazily - // would be too late. - def useCompiler: Boolean = false - def memWidth: Int - def libWidth: Int - def memDepth: BigInt - def libDepth: BigInt - def memMaskGran: Option[Int] = None - def libMaskGran: Option[Int] = None - def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() - def extraTag: String = "" - - // "Effective" libMaskGran by considering write_enable. - val effectiveLibMaskGran = libMaskGran.getOrElse(libWidth) - - // Override this in the sub-generator if you need a more specific name. - // Defaults to using reflection to pull the name of the test using this - // generator. - def generatorType: String = this.getClass.getSimpleName - - //require (memDepth >= libDepth) - - // Convenience variables to check if a mask exists. - val memHasMask = memMaskGran != None - val libHasMask = libMaskGran != None - // We need to figure out how many mask bits there are in the mem. - val memMaskBits = if (memHasMask) memWidth / memMaskGran.get else 0 - val libMaskBits = if (libHasMask) libWidth / libMaskGran.get else 0 - - val extraTagPrefixed = if (extraTag == "") "" else ("-" + extraTag) - - val mem = s"mem-${generatorType}${extraTagPrefixed}.json" - val lib = s"lib-${generatorType}${extraTagPrefixed}.json" - val v = s"${generatorType}${extraTagPrefixed}.v" - - lazy val mem_name = "target_memory" - val mem_addr_width = MacroCompilerMath.ceilLog2(memDepth) - - lazy val lib_name = "awesome_lib_mem" - val lib_addr_width = MacroCompilerMath.ceilLog2(libDepth) - - // Override these to change the port prefixes if needed. - def libPortPrefix: String = "lib" - def memPortPrefix: String = "outer" - - // These generate "simple" SRAMs (1 masked read-write port) by default, - // but can be overridden if need be. - def generateLibSRAM() = generateSRAM(lib_name, libPortPrefix, libWidth, libDepth, libMaskGran, extraPorts) - def generateMemSRAM() = generateSRAM(mem_name, memPortPrefix, memWidth, memDepth, memMaskGran) - - def libSRAM = generateLibSRAM - def memSRAM = generateMemSRAM - - def libSRAMs: Seq[SRAMMacro] = Seq(libSRAM) - def memSRAMs: Seq[SRAMMacro] = Seq(memSRAM) - - writeToLib(lib, libSRAMs) - writeToMem(mem, memSRAMs) - - // For masks, width it's a bit tricky since we have to consider cases like - // memMaskGran = 4 and libMaskGran = 8. - // Consider the actually usable libWidth in cases like the above. - val usableLibWidth = if (memMaskGran.getOrElse(Int.MaxValue) < effectiveLibMaskGran) memMaskGran.get else libWidth - - // Number of lib instances needed to hold the mem, in both directions. - // Round up (e.g. 1.5 instances = effectively 2 instances) - val depthInstances = math.ceil(memDepth.toFloat / libDepth.toFloat).toInt - val widthInstances = math.ceil(memWidth.toFloat / usableLibWidth).toInt - - // Number of width bits in the last width-direction memory. - // e.g. if memWidth = 16 and libWidth = 8, this would be 8 since the last memory 0_1 has 8 bits of input width. - // e.g. if memWidth = 9 and libWidth = 8, this would be 1 since the last memory 0_1 has 1 bit of input width. - lazy val lastWidthBits = if (memWidth % usableLibWidth == 0) usableLibWidth else (memWidth % usableLibWidth) - lazy val selectBits = mem_addr_width - lib_addr_width - - /** - * Convenience function to generate a mask statement. - * @param widthInst Width instance (mem_0_x) - * @param depthInst Depth instance (mem_x_0) - */ - def generateMaskStatement(widthInst: Int, depthInst: Int): String = { - // Width of this submemory. - val myMemWidth = if (widthInst == widthInstances - 1) lastWidthBits else usableLibWidth - // Base bit of this submemory. - // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this - // would be 16. - val myBaseBit = usableLibWidth*widthInst - - if (libMaskGran.isDefined) { - if (memMaskGran.isEmpty) { - // If there is no memory mask, we should just turn all the lib mask - // bits high. - s"""mem_${depthInst}_${widthInst}.lib_mask <= UInt<${libMaskBits}>("h${((1 << libMaskBits) - 1).toHexString}")""" - } else { - // Calculate which bit of outer_mask contains the given bit. - // e.g. if memMaskGran = 2, libMaskGran = 1 and libWidth = 4, then - // calculateMaskBit({0, 1}) = 0 and calculateMaskBit({1, 2}) = 1 - def calculateMaskBit(bit:Int): Int = bit / memMaskGran.getOrElse(memWidth) - - val bitsArr = ((libMaskBits - 1 to 0 by -1) map (x => { - if (x*libMaskGran.get > myMemWidth) { - // If we have extra mask bits leftover after the effective width, - // disable those bits. - """UInt<1>("h0")""" - } else { - val outerMaskBit = calculateMaskBit(x*libMaskGran.get + myBaseBit) - s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" - } - })) - val maskVal = bitsArr.reduceRight((bit, rest) => s"cat($bit, $rest)") - s"mem_${depthInst}_${widthInst}.lib_mask <= ${maskVal}" - } - } else "" - } - - /** Helper function to generate a port. - * - * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") - * @param addrWidth Address port width - * @param width data width - * @param write Has a write port? - * @param writeEnable Has a write enable port? - * @param read Has a read port? - * @param readEnable Has a read enable port? - * @param mask Mask granularity (# bits) of the port or None. - * @param extraPorts Extra ports (name, # bits) - */ - def generatePort(prefix: String, addrWidth: Int, width: Int, write: Boolean, writeEnable: Boolean, read: Boolean, readEnable: Boolean, mask: Option[Int], extraPorts: Seq[(String, Int)] = Seq()): String = { - val realPrefix = if (prefix == "") "" else prefix + "_" - - val readStr = if (read) s"output ${realPrefix}dout : UInt<$width>" else "" - val writeStr = if (write) s"input ${realPrefix}din : UInt<$width>" else "" - val readEnableStr = if (readEnable) s"input ${realPrefix}read_en : UInt<1>" else "" - val writeEnableStr = if (writeEnable) s"input ${realPrefix}write_en : UInt<1>" else "" - val maskStr = mask match { - case Some(maskBits: Int) => s"input ${realPrefix}mask : UInt<$maskBits>" - case _ => "" + // Override these with "override lazy val". + // Why lazy? These are used in the constructor here so overriding non-lazily + // would be too late. + def useCompiler: Boolean = false + def memWidth: Int + def libWidth: Int + def memDepth: BigInt + def libDepth: BigInt + def memMaskGran: Option[Int] = None + def libMaskGran: Option[Int] = None + def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() + def extraTag: String = "" + + // "Effective" libMaskGran by considering write_enable. + val effectiveLibMaskGran = libMaskGran.getOrElse(libWidth) + + // Override this in the sub-generator if you need a more specific name. + // Defaults to using reflection to pull the name of the test using this + // generator. + def generatorType: String = this.getClass.getSimpleName + + //require (memDepth >= libDepth) + + // Convenience variables to check if a mask exists. + val memHasMask = memMaskGran != None + val libHasMask = libMaskGran != None + // We need to figure out how many mask bits there are in the mem. + val memMaskBits = if (memHasMask) memWidth / memMaskGran.get else 0 + val libMaskBits = if (libHasMask) libWidth / libMaskGran.get else 0 + + val extraTagPrefixed = if (extraTag == "") "" else ("-" + extraTag) + + val mem = s"mem-${generatorType}${extraTagPrefixed}.json" + val lib = s"lib-${generatorType}${extraTagPrefixed}.json" + val v = s"${generatorType}${extraTagPrefixed}.v" + + lazy val mem_name = "target_memory" + val mem_addr_width = MacroCompilerMath.ceilLog2(memDepth) + + lazy val lib_name = "awesome_lib_mem" + val lib_addr_width = MacroCompilerMath.ceilLog2(libDepth) + + // Override these to change the port prefixes if needed. + def libPortPrefix: String = "lib" + def memPortPrefix: String = "outer" + + // These generate "simple" SRAMs (1 masked read-write port) by default, + // but can be overridden if need be. + def generateLibSRAM() = generateSRAM(lib_name, libPortPrefix, libWidth, libDepth, libMaskGran, extraPorts) + def generateMemSRAM() = generateSRAM(mem_name, memPortPrefix, memWidth, memDepth, memMaskGran) + + def libSRAM = generateLibSRAM + def memSRAM = generateMemSRAM + + def libSRAMs: Seq[SRAMMacro] = Seq(libSRAM) + def memSRAMs: Seq[SRAMMacro] = Seq(memSRAM) + + writeToLib(lib, libSRAMs) + writeToMem(mem, memSRAMs) + + // For masks, width it's a bit tricky since we have to consider cases like + // memMaskGran = 4 and libMaskGran = 8. + // Consider the actually usable libWidth in cases like the above. + val usableLibWidth = if (memMaskGran.getOrElse(Int.MaxValue) < effectiveLibMaskGran) memMaskGran.get else libWidth + + // Number of lib instances needed to hold the mem, in both directions. + // Round up (e.g. 1.5 instances = effectively 2 instances) + val depthInstances = math.ceil(memDepth.toFloat / libDepth.toFloat).toInt + val widthInstances = math.ceil(memWidth.toFloat / usableLibWidth).toInt + + // Number of width bits in the last width-direction memory. + // e.g. if memWidth = 16 and libWidth = 8, this would be 8 since the last memory 0_1 has 8 bits of input width. + // e.g. if memWidth = 9 and libWidth = 8, this would be 1 since the last memory 0_1 has 1 bit of input width. + lazy val lastWidthBits = if (memWidth % usableLibWidth == 0) usableLibWidth else (memWidth % usableLibWidth) + lazy val selectBits = mem_addr_width - lib_addr_width + + /** Convenience function to generate a mask statement. + * @param widthInst Width instance (mem_0_x) + * @param depthInst Depth instance (mem_x_0) + */ + def generateMaskStatement(widthInst: Int, depthInst: Int): String = { + // Width of this submemory. + val myMemWidth = if (widthInst == widthInstances - 1) lastWidthBits else usableLibWidth + // Base bit of this submemory. + // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this + // would be 16. + val myBaseBit = usableLibWidth * widthInst + + if (libMaskGran.isDefined) { + if (memMaskGran.isEmpty) { + // If there is no memory mask, we should just turn all the lib mask + // bits high. + s"""mem_${depthInst}_${widthInst}.lib_mask <= UInt<${libMaskBits}>("h${((1 << libMaskBits) - 1).toHexString}")""" + } else { + // Calculate which bit of outer_mask contains the given bit. + // e.g. if memMaskGran = 2, libMaskGran = 1 and libWidth = 4, then + // calculateMaskBit({0, 1}) = 0 and calculateMaskBit({1, 2}) = 1 + def calculateMaskBit(bit: Int): Int = bit / memMaskGran.getOrElse(memWidth) + + val bitsArr = ((libMaskBits - 1 to 0 by -1).map(x => { + if (x * libMaskGran.get > myMemWidth) { + // If we have extra mask bits leftover after the effective width, + // disable those bits. + """UInt<1>("h0")""" + } else { + val outerMaskBit = calculateMaskBit(x * libMaskGran.get + myBaseBit) + s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" + } + })) + val maskVal = bitsArr.reduceRight((bit, rest) => s"cat($bit, $rest)") + s"mem_${depthInst}_${widthInst}.lib_mask <= ${maskVal}" } - val extraPortsStr = extraPorts.map { case (name, bits) => s" input $name : UInt<$bits>" }.mkString("\n") - s""" + } else "" + } + + /** Helper function to generate a port. + * + * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") + * @param addrWidth Address port width + * @param width data width + * @param write Has a write port? + * @param writeEnable Has a write enable port? + * @param read Has a read port? + * @param readEnable Has a read enable port? + * @param mask Mask granularity (# bits) of the port or None. + * @param extraPorts Extra ports (name, # bits) + */ + def generatePort( + prefix: String, + addrWidth: Int, + width: Int, + write: Boolean, + writeEnable: Boolean, + read: Boolean, + readEnable: Boolean, + mask: Option[Int], + extraPorts: Seq[(String, Int)] = Seq() + ): String = { + val realPrefix = if (prefix == "") "" else prefix + "_" + + val readStr = if (read) s"output ${realPrefix}dout : UInt<$width>" else "" + val writeStr = if (write) s"input ${realPrefix}din : UInt<$width>" else "" + val readEnableStr = if (readEnable) s"input ${realPrefix}read_en : UInt<1>" else "" + val writeEnableStr = if (writeEnable) s"input ${realPrefix}write_en : UInt<1>" else "" + val maskStr = mask match { + case Some(maskBits: Int) => s"input ${realPrefix}mask : UInt<$maskBits>" + case _ => "" + } + val extraPortsStr = extraPorts.map { case (name, bits) => s" input $name : UInt<$bits>" }.mkString("\n") + s""" input ${realPrefix}addr : UInt<$addrWidth> input ${realPrefix}clk : Clock $writeStr @@ -362,92 +434,121 @@ trait HasSimpleTestGenerator { $maskStr $extraPortsStr """ - } + } - /** - * Helper function to generate a RW footer port. - * - * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") - * @param readEnable Has a read enable port? - * @param mask Mask granularity (# bits) of the port or None. - * @param extraPorts Extra ports (name, # bits) - */ - def generateReadWriteFooterPort(prefix: String, readEnable: Boolean, mask: Option[Int], extraPorts: Seq[(String, Int)] = Seq()): String = { - generatePort(prefix, lib_addr_width, libWidth, - write = true, writeEnable = true, read = true, readEnable = readEnable, mask = mask, extraPorts = extraPorts) - } + /** Helper function to generate a RW footer port. + * + * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") + * @param readEnable Has a read enable port? + * @param mask Mask granularity (# bits) of the port or None. + * @param extraPorts Extra ports (name, # bits) + */ + def generateReadWriteFooterPort( + prefix: String, + readEnable: Boolean, + mask: Option[Int], + extraPorts: Seq[(String, Int)] = Seq() + ): String = { + generatePort( + prefix, + lib_addr_width, + libWidth, + write = true, + writeEnable = true, + read = true, + readEnable = readEnable, + mask = mask, + extraPorts = extraPorts + ) + } - /** Helper function to generate a RW header port. - * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") - * @param readEnable Has a read enable port? - * @param mask Mask granularity (# bits) of the port or None. */ - def generateReadWriteHeaderPort(prefix: String, readEnable: Boolean, mask: Option[Int]): String = { - generatePort(prefix, mem_addr_width, memWidth, - write=true, writeEnable=true, read=true, readEnable=readEnable, mask) - } + /** Helper function to generate a RW header port. + * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") + * @param readEnable Has a read enable port? + * @param mask Mask granularity (# bits) of the port or None. + */ + def generateReadWriteHeaderPort(prefix: String, readEnable: Boolean, mask: Option[Int]): String = { + generatePort( + prefix, + mem_addr_width, + memWidth, + write = true, + writeEnable = true, + read = true, + readEnable = readEnable, + mask + ) + } - // Generate the header memory ports. - def generateHeaderPorts(): String = { - require (memSRAM.ports.size == 1, "Header generator only supports single RW port mem") - generateReadWriteHeaderPort(memPortPrefix, memSRAM.ports(0).readEnable.isDefined, if (memHasMask) Some(memMaskBits) else None) - } + // Generate the header memory ports. + def generateHeaderPorts(): String = { + require(memSRAM.ports.size == 1, "Header generator only supports single RW port mem") + generateReadWriteHeaderPort( + memPortPrefix, + memSRAM.ports(0).readEnable.isDefined, + if (memHasMask) Some(memMaskBits) else None + ) + } - // Generate the header (contains the circuit statement and the target memory - // module. - def generateHeader(): String = { - s""" + // Generate the header (contains the circuit statement and the target memory + // module. + def generateHeader(): String = { + s""" circuit $mem_name : module $mem_name : ${generateHeaderPorts} """ - } + } - // Generate the target memory ports. - def generateFooterPorts(): String = { - require(libSRAM.ports.size == 1, "Footer generator only supports single RW port mem") - generateReadWriteFooterPort(libPortPrefix, libSRAM.ports(0).readEnable.isDefined, - if (libHasMask) Some(libMaskBits) else None, extraPorts.map(p => (p.name, p.width))) - } + // Generate the target memory ports. + def generateFooterPorts(): String = { + require(libSRAM.ports.size == 1, "Footer generator only supports single RW port mem") + generateReadWriteFooterPort( + libPortPrefix, + libSRAM.ports(0).readEnable.isDefined, + if (libHasMask) Some(libMaskBits) else None, + extraPorts.map(p => (p.name, p.width)) + ) + } - // Generate the footer (contains the target memory extmodule declaration by default). - def generateFooter(): String = { - s""" + // Generate the footer (contains the target memory extmodule declaration by default). + def generateFooter(): String = { + s""" extmodule $lib_name : ${generateFooterPorts} defname = $lib_name """ - } + } - // Abstract method to generate body; to be overridden by specific generator type. - def generateBody(): String + // Abstract method to generate body; to be overridden by specific generator type. + def generateBody(): String - // Generate the entire output from header, body, and footer. - def generateOutput(): String = { - s""" + // Generate the entire output from header, body, and footer. + def generateOutput(): String = { + s""" ${generateHeader} ${generateBody} ${generateFooter} """ - } + } - val output = generateOutput() + val output = generateOutput() } // Use this trait for tests that invoke the memory compiler without lib. trait HasNoLibTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => - // If there isn't a lib, then the "lib" will become a FIRRTL "mem", which - // in turn becomes synthesized flops. - // Therefore, make "lib" width/depth equal to the mem. - override lazy val libDepth = memDepth - override lazy val libWidth = memWidth - override lazy val lib_name = mem_name - // Do the same for port names. - override lazy val libPortPrefix = memPortPrefix - - // If there is no lib, don't generate a body. - override def generateBody = "" + // If there isn't a lib, then the "lib" will become a FIRRTL "mem", which + // in turn becomes synthesized flops. + // Therefore, make "lib" width/depth equal to the mem. + override lazy val libDepth = memDepth + override lazy val libWidth = memWidth + override lazy val lib_name = mem_name + // Do the same for port names. + override lazy val libPortPrefix = memPortPrefix + + // If there is no lib, don't generate a body. + override def generateBody = "" } - diff --git a/macros/src/test/scala/barstools/macros/Masks.scala b/macros/src/test/scala/barstools/macros/Masks.scala index f104c8f2f..c472669ac 100644 --- a/macros/src/test/scala/barstools/macros/Masks.scala +++ b/macros/src/test/scala/barstools/macros/Masks.scala @@ -4,32 +4,36 @@ package barstools.macros trait MasksTestSettings { this: MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator => - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) + override lazy val memDepth = BigInt(2048) + override lazy val libDepth = BigInt(1024) } // Try all four different kinds of mask config: -/** - * - * Non-masked mem Masked mem - * --------------------------------- - * Non-masked lib | | | - * --------------------------------- - * Masked lib | | | - * --------------------------------- - */ - -class Masks_FourTypes_NonMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +/** Non-masked mem Masked mem + * --------------------------------- + * Non-masked lib | | | + * --------------------------------- + * Masked lib | | | + * --------------------------------- + */ + +class Masks_FourTypes_NonMaskedMem_NonMaskedLib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = None - override lazy val libWidth = 8 + override lazy val libWidth = 8 override lazy val libMaskGran = None compileExecuteAndTest(mem, lib, v, output) } -class Masks_FourTypes_NonMaskedMem_MaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class Masks_FourTypes_NonMaskedMem_MaskedLib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = None @@ -39,7 +43,10 @@ class Masks_FourTypes_NonMaskedMem_MaskedLib extends MacroCompilerSpec with HasS compileExecuteAndTest(mem, lib, v, output) } -class Masks_FourTypes_MaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class Masks_FourTypes_MaskedMem_NonMaskedLib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = Some(8) @@ -49,7 +56,10 @@ class Masks_FourTypes_MaskedMem_NonMaskedLib extends MacroCompilerSpec with HasS compileExecuteAndTest(mem, lib, v, output) } -class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = Some(4) @@ -59,7 +69,10 @@ class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran extends MacroCompil compileExecuteAndTest(mem, lib, v, output) } -class Masks_FourTypes_MaskedMem_MaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class Masks_FourTypes_MaskedMem_MaskedLib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = Some(8) @@ -69,7 +82,10 @@ class Masks_FourTypes_MaskedMem_MaskedLib extends MacroCompilerSpec with HasSRAM compileExecuteAndTest(mem, lib, v, output) } -class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 override lazy val memMaskGran = Some(8) @@ -79,7 +95,10 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran extends MacroCompilerSpec compileExecuteAndTest(mem, lib, v, output) } -class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 64 override lazy val memMaskGran = Some(4) @@ -103,7 +122,11 @@ class Masks_BitMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGene // FPGA-style byte-masked memories. -class Masks_FPGAStyle_32_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_FPGAStyle_32_8 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 32 override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(8) @@ -113,7 +136,11 @@ class Masks_FPGAStyle_32_8 extends MacroCompilerSpec with HasSRAMGenerator with // Simple powers of two with bit-masked lib. -class Masks_PowersOfTwo_8_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_PowersOfTwo_8_1 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 64 override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(1) @@ -121,7 +148,11 @@ class Masks_PowersOfTwo_8_1 extends MacroCompilerSpec with HasSRAMGenerator with compileExecuteAndTest(mem, lib, v, output) } -class Masks_PowersOfTwo_16_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_PowersOfTwo_16_1 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 64 override lazy val memMaskGran = Some(16) override lazy val libMaskGran = Some(1) @@ -129,7 +160,11 @@ class Masks_PowersOfTwo_16_1 extends MacroCompilerSpec with HasSRAMGenerator wit compileExecuteAndTest(mem, lib, v, output) } -class Masks_PowersOfTwo_32_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_PowersOfTwo_32_1 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 64 override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(1) @@ -137,7 +172,11 @@ class Masks_PowersOfTwo_32_1 extends MacroCompilerSpec with HasSRAMGenerator wit compileExecuteAndTest(mem, lib, v, output) } -class Masks_PowersOfTwo_64_1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_PowersOfTwo_64_1 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 64 override lazy val memMaskGran = Some(64) override lazy val libMaskGran = Some(1) @@ -147,7 +186,11 @@ class Masks_PowersOfTwo_64_1 extends MacroCompilerSpec with HasSRAMGenerator wit // Simple powers of two with non bit-masked lib. -class Masks_PowersOfTwo_32_4 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_PowersOfTwo_32_4 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 128 override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(4) @@ -155,7 +198,11 @@ class Masks_PowersOfTwo_32_4 extends MacroCompilerSpec with HasSRAMGenerator wit compileExecuteAndTest(mem, lib, v, output) } -class Masks_PowersOfTwo_32_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_PowersOfTwo_32_8 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 128 override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(8) @@ -163,7 +210,11 @@ class Masks_PowersOfTwo_32_8 extends MacroCompilerSpec with HasSRAMGenerator wit compileExecuteAndTest(mem, lib, v, output) } -class Masks_PowersOfTwo_8_8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_PowersOfTwo_8_8 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 128 override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(8) @@ -173,7 +224,11 @@ class Masks_PowersOfTwo_8_8 extends MacroCompilerSpec with HasSRAMGenerator with // Width as a multiple of the mask, bit-masked lib -class Masks_IntegerMaskMultiple_20_10 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_IntegerMaskMultiple_20_10 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 20 override lazy val memMaskGran = Some(10) override lazy val libMaskGran = Some(1) @@ -181,16 +236,24 @@ class Masks_IntegerMaskMultiple_20_10 extends MacroCompilerSpec with HasSRAMGene compileExecuteAndTest(mem, lib, v, output) } -class Masks_IntegerMaskMultiple_21_7 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_IntegerMaskMultiple_21_7 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 21 override lazy val memMaskGran = Some(21) override lazy val libMaskGran = Some(7) - it should "be enabled when non-power of two masks are supported" is (pending) + (it should "be enabled when non-power of two masks are supported").is(pending) //~ compileExecuteAndTest(mem, lib, v, output) } -class Masks_IntegerMaskMultiple_21_21 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_IntegerMaskMultiple_21_21 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 21 override lazy val memMaskGran = Some(21) override lazy val libMaskGran = Some(1) @@ -198,7 +261,11 @@ class Masks_IntegerMaskMultiple_21_21 extends MacroCompilerSpec with HasSRAMGene compileExecuteAndTest(mem, lib, v, output) } -class Masks_IntegerMaskMultiple_84_21 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_IntegerMaskMultiple_84_21 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 84 override lazy val memMaskGran = Some(21) override lazy val libMaskGran = Some(1) @@ -206,7 +273,11 @@ class Masks_IntegerMaskMultiple_84_21 extends MacroCompilerSpec with HasSRAMGene compileExecuteAndTest(mem, lib, v, output) } -class Masks_IntegerMaskMultiple_92_23 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_IntegerMaskMultiple_92_23 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 92 override lazy val memMaskGran = Some(23) override lazy val libMaskGran = Some(1) @@ -214,7 +285,11 @@ class Masks_IntegerMaskMultiple_92_23 extends MacroCompilerSpec with HasSRAMGene compileExecuteAndTest(mem, lib, v, output) } -class Masks_IntegerMaskMultiple_117_13 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_IntegerMaskMultiple_117_13 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 117 override lazy val memMaskGran = Some(13) override lazy val libMaskGran = Some(1) @@ -222,7 +297,11 @@ class Masks_IntegerMaskMultiple_117_13 extends MacroCompilerSpec with HasSRAMGen compileExecuteAndTest(mem, lib, v, output) } -class Masks_IntegerMaskMultiple_160_20 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_IntegerMaskMultiple_160_20 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 160 override lazy val memMaskGran = Some(20) override lazy val libMaskGran = Some(1) @@ -230,7 +309,11 @@ class Masks_IntegerMaskMultiple_160_20 extends MacroCompilerSpec with HasSRAMGen compileExecuteAndTest(mem, lib, v, output) } -class Masks_IntegerMaskMultiple_184_23 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_IntegerMaskMultiple_184_23 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 184 override lazy val memMaskGran = Some(23) override lazy val libMaskGran = Some(1) @@ -240,11 +323,15 @@ class Masks_IntegerMaskMultiple_184_23 extends MacroCompilerSpec with HasSRAMGen // Width as an non-integer multiple of the mask, bit-masked lib -class Masks_NonIntegerMaskMultiple_32_3 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with MasksTestSettings { +class Masks_NonIntegerMaskMultiple_32_3 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with MasksTestSettings { override lazy val width = 32 override lazy val memMaskGran = Some(3) override lazy val libMaskGran = Some(1) - it should "be enabled when non-power of two masks are supported" is (pending) + (it should "be enabled when non-power of two masks are supported").is(pending) //~ compileExecuteAndTest(mem, lib, v, output) } diff --git a/macros/src/test/scala/barstools/macros/MultiPort.scala b/macros/src/test/scala/barstools/macros/MultiPort.scala index 3899f8359..1968f6aa4 100644 --- a/macros/src/test/scala/barstools/macros/MultiPort.scala +++ b/macros/src/test/scala/barstools/macros/MultiPort.scala @@ -13,42 +13,70 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim override def generateMemSRAM() = { SRAMMacro( - name=mem_name, - width=memWidth, - depth=memDepth, - family="2rw", - ports=Seq(generateTestPort( - "portA", memWidth, Some(memDepth), maskGran=memMaskGran, - write=true, writeEnable=true, - read=true, readEnable=true - ), generateTestPort( - "portB", memWidth, Some(memDepth), maskGran=memMaskGran, - write=true, writeEnable=true, - read=true, readEnable=true - )) + name = mem_name, + width = memWidth, + depth = memDepth, + family = "2rw", + ports = Seq( + generateTestPort( + "portA", + memWidth, + Some(memDepth), + maskGran = memMaskGran, + write = true, + writeEnable = true, + read = true, + readEnable = true + ), + generateTestPort( + "portB", + memWidth, + Some(memDepth), + maskGran = memMaskGran, + write = true, + writeEnable = true, + read = true, + readEnable = true + ) + ) ) } override def generateLibSRAM() = { SRAMMacro( - name=lib_name, - width=libWidth, - depth=libDepth, - family="2rw", - ports=Seq(generateTestPort( - "portA", libWidth, libDepth, - write=true, writeEnable=true, - read=true, readEnable=true - ), generateTestPort( - "portB", libWidth, libDepth, - write=true, writeEnable=true, - read=true, readEnable=true - )) + name = lib_name, + width = libWidth, + depth = libDepth, + family = "2rw", + ports = Seq( + generateTestPort( + "portA", + libWidth, + libDepth, + write = true, + writeEnable = true, + read = true, + readEnable = true + ), + generateTestPort( + "portB", + libWidth, + libDepth, + write = true, + writeEnable = true, + read = true, + readEnable = true + ) + ) ) } override def generateHeaderPorts() = { - generateReadWriteHeaderPort("portA", true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort("portB", true, Some(memMaskBits)) + generateReadWriteHeaderPort("portA", true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort( + "portB", + true, + Some(memMaskBits) + ) } override def generateFooterPorts() = { @@ -56,7 +84,7 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim } override def generateBody() = -""" + """ inst mem_0_0 of awesome_lib_mem inst mem_0_1 of awesome_lib_mem inst mem_0_2 of awesome_lib_mem @@ -128,56 +156,112 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS override def generateMemSRAM() = { SRAMMacro( - name=mem_name, - width=memWidth, - depth=memDepth, - family="1r1w", - ports=Seq(generateTestPort( - "portA", memWidth, Some(memDepth), maskGran=memMaskGran, - write=false, writeEnable=false, - read=true, readEnable=true - ), generateTestPort( - "portB", memWidth, Some(memDepth), maskGran=memMaskGran, - write=true, writeEnable=true, - read=false, readEnable=false - )) + name = mem_name, + width = memWidth, + depth = memDepth, + family = "1r1w", + ports = Seq( + generateTestPort( + "portA", + memWidth, + Some(memDepth), + maskGran = memMaskGran, + write = false, + writeEnable = false, + read = true, + readEnable = true + ), + generateTestPort( + "portB", + memWidth, + Some(memDepth), + maskGran = memMaskGran, + write = true, + writeEnable = true, + read = false, + readEnable = false + ) + ) ) } override def generateLibSRAM() = { SRAMMacro( - name=lib_name, - width=libWidth, - depth=libDepth, - family="1r1w", - ports=Seq(generateTestPort( - "portA", libWidth, libDepth, - write=false, writeEnable=false, - read=true, readEnable=true - ), generateTestPort( - "portB", libWidth, libDepth, - write=true, writeEnable=true, - read=false, readEnable=false - )) + name = lib_name, + width = libWidth, + depth = libDepth, + family = "1r1w", + ports = Seq( + generateTestPort( + "portA", + libWidth, + libDepth, + write = false, + writeEnable = false, + read = true, + readEnable = true + ), + generateTestPort( + "portB", + libWidth, + libDepth, + write = true, + writeEnable = true, + read = false, + readEnable = false + ) + ) ) } override def generateHeaderPorts() = { - generatePort("portA", mem_addr_width, memWidth, - write=false, writeEnable=false, read=true, readEnable=true, Some(memMaskBits)) + "\n" + - generatePort("portB", mem_addr_width, memWidth, - write=true, writeEnable=true, read=false, readEnable=false, Some(memMaskBits)) + generatePort( + "portA", + mem_addr_width, + memWidth, + write = false, + writeEnable = false, + read = true, + readEnable = true, + Some(memMaskBits) + ) + "\n" + + generatePort( + "portB", + mem_addr_width, + memWidth, + write = true, + writeEnable = true, + read = false, + readEnable = false, + Some(memMaskBits) + ) } override def generateFooterPorts() = { - generatePort("portA", lib_addr_width, libWidth, - write=false, writeEnable=false, read=true, readEnable=true, None) + "\n" + - generatePort("portB", lib_addr_width, libWidth, - write=true, writeEnable=true, read=false, readEnable=false, None) + generatePort( + "portA", + lib_addr_width, + libWidth, + write = false, + writeEnable = false, + read = true, + readEnable = true, + None + ) + "\n" + + generatePort( + "portB", + lib_addr_width, + libWidth, + write = true, + writeEnable = true, + read = false, + readEnable = false, + None + ) } override def generateBody() = -""" + """ inst mem_0_0 of awesome_lib_mem inst mem_0_1 of awesome_lib_mem inst mem_0_2 of awesome_lib_mem @@ -234,42 +318,70 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera override def generateMemSRAM() = { println(memMaskGranB) SRAMMacro( - name=mem_name, - width=memWidth, - depth=memDepth, - family="2rw", - ports=Seq(generateTestPort( - "portA", memWidth, Some(memDepth), maskGran=memMaskGran, - write=true, writeEnable=true, - read=true, readEnable=true - ), generateTestPort( - "portB", memWidth, Some(memDepth), maskGran=Some(memMaskGranB), - write=true, writeEnable=true, - read=true, readEnable=true - )) + name = mem_name, + width = memWidth, + depth = memDepth, + family = "2rw", + ports = Seq( + generateTestPort( + "portA", + memWidth, + Some(memDepth), + maskGran = memMaskGran, + write = true, + writeEnable = true, + read = true, + readEnable = true + ), + generateTestPort( + "portB", + memWidth, + Some(memDepth), + maskGran = Some(memMaskGranB), + write = true, + writeEnable = true, + read = true, + readEnable = true + ) + ) ) } override def generateLibSRAM() = { SRAMMacro( - name=lib_name, - width=libWidth, - depth=libDepth, - family="2rw", - ports=Seq(generateTestPort( - "portA", libWidth, libDepth, - write=true, writeEnable=true, - read=true, readEnable=true - ), generateTestPort( - "portB", libWidth, libDepth, - write=true, writeEnable=true, - read=true, readEnable=true - )) + name = lib_name, + width = libWidth, + depth = libDepth, + family = "2rw", + ports = Seq( + generateTestPort( + "portA", + libWidth, + libDepth, + write = true, + writeEnable = true, + read = true, + readEnable = true + ), + generateTestPort( + "portB", + libWidth, + libDepth, + write = true, + writeEnable = true, + read = true, + readEnable = true + ) + ) ) } override def generateHeaderPorts() = { - generateReadWriteHeaderPort("portA", true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort("portB", true, Some(memWidth / memMaskGranB)) + generateReadWriteHeaderPort("portA", true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort( + "portB", + true, + Some(memWidth / memMaskGranB) + ) } override def generateFooterPorts() = { @@ -277,7 +389,7 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera } override def generateBody() = -""" + """ inst mem_0_0 of awesome_lib_mem inst mem_0_1 of awesome_lib_mem inst mem_0_2 of awesome_lib_mem diff --git a/macros/src/test/scala/barstools/macros/SRAMCompiler.scala b/macros/src/test/scala/barstools/macros/SRAMCompiler.scala index e4e62de7c..17f496011 100644 --- a/macros/src/test/scala/barstools/macros/SRAMCompiler.scala +++ b/macros/src/test/scala/barstools/macros/SRAMCompiler.scala @@ -13,8 +13,7 @@ class SRAMCompiler extends MacroCompilerSpec with HasSRAMGenerator with HasSimpl writeToLib(lib, Seq(compiler)) - writeToMem(mem, Seq(generateSRAM("mymem", "X", 8, 16))) - compileExecuteAndTest(mem, Some(lib), verilog, output=output, false, true) + compileExecuteAndTest(mem, Some(lib), verilog, output = output, false, true) } diff --git a/macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala b/macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala index f016dbc70..5a7fc77d1 100644 --- a/macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala +++ b/macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala @@ -6,33 +6,36 @@ package barstools.macros trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => - def width: Int + def width: Int - override lazy val memWidth = width - override lazy val libWidth = width + override lazy val memWidth = width + override lazy val libWidth = width - // Generate a depth-splitting body. - override def generateBody(): String = { - val output = new StringBuilder + // Generate a depth-splitting body. + override def generateBody(): String = { + val output = new StringBuilder - if (selectBits > 0) { - output.append ( -s""" + if (selectBits > 0) { + output.append( + s""" node ${memPortPrefix}_addr_sel = bits(${memPortPrefix}_addr, ${mem_addr_width - 1}, $lib_addr_width) reg ${memPortPrefix}_addr_sel_reg : UInt<${selectBits}>, ${memPortPrefix}_clk with : reset => (UInt<1>("h0"), ${memPortPrefix}_addr_sel_reg) ${memPortPrefix}_addr_sel_reg <= mux(UInt<1>("h1"), ${memPortPrefix}_addr_sel, ${memPortPrefix}_addr_sel_reg) """ - ) - } + ) + } - for (i <- 0 to depthInstances - 1) { - val maskStatement = generateMaskStatement(0, i) - val enableIdentifier = if (selectBits > 0) s"""eq(${memPortPrefix}_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" - val chipEnable = s"""UInt<1>("h1")""" - val writeEnable = if (memMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, ${chipEnable})" else s"${memPortPrefix}_write_en" - output.append( - s""" + for (i <- 0 to depthInstances - 1) { + val maskStatement = generateMaskStatement(0, i) + val enableIdentifier = + if (selectBits > 0) s"""eq(${memPortPrefix}_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" + else "UInt<1>(\"h1\")" + val chipEnable = s"""UInt<1>("h1")""" + val writeEnable = + if (memMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, ${chipEnable})" else s"${memPortPrefix}_write_en" + output.append( + s""" inst mem_${i}_0 of ${lib_name} mem_${i}_0.${libPortPrefix}_clk <= ${memPortPrefix}_clk mem_${i}_0.${libPortPrefix}_addr <= ${memPortPrefix}_addr @@ -42,26 +45,29 @@ s""" mem_${i}_0.${libPortPrefix}_write_en <= and(and(${writeEnable}, UInt<1>("h1")), ${enableIdentifier}) node ${memPortPrefix}_dout_${i} = ${memPortPrefix}_dout_${i}_0 """ - ) - } - def generate_outer_dout_tree(i:Int, depthInstances: Int): String = { - if (i > depthInstances - 1) { - s"""UInt<${libWidth}>("h0")""" - } else { - s"""mux(eq(${memPortPrefix}_addr_sel_reg, UInt<%d>("h%s")), ${memPortPrefix}_dout_%d, %s)""".format( - selectBits, i.toHexString, i, generate_outer_dout_tree(i + 1, depthInstances) - ) - } - } - output append s" ${memPortPrefix}_dout <= " - if (selectBits > 0) { - output append generate_outer_dout_tree(0, depthInstances) + ) + } + def generate_outer_dout_tree(i: Int, depthInstances: Int): String = { + if (i > depthInstances - 1) { + s"""UInt<${libWidth}>("h0")""" } else { - output append s"""mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<${libWidth}>("h0"))""" + s"""mux(eq(${memPortPrefix}_addr_sel_reg, UInt<%d>("h%s")), ${memPortPrefix}_dout_%d, %s)""".format( + selectBits, + i.toHexString, + i, + generate_outer_dout_tree(i + 1, depthInstances) + ) } - - output.toString } + output.append(s" ${memPortPrefix}_dout <= ") + if (selectBits > 0) { + output.append(generate_outer_dout_tree(0, depthInstances)) + } else { + output.append(s"""mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<${libWidth}>("h0"))""") + } + + output.toString + } } // Try different widths @@ -154,7 +160,10 @@ class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator } // Non-bit level mask -class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { +class SplitDepth2048x64_mrw_mem32_lib8 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator { override lazy val width = 64 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) @@ -165,7 +174,10 @@ class SplitDepth2048x64_mrw_mem32_lib8 extends MacroCompilerSpec with HasSRAMGen } // Bit level mask -class SplitDepth2048x32_mrw_mem16_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { +class SplitDepth2048x32_mrw_mem16_lib1 + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator { override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) @@ -213,7 +225,7 @@ class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val memMaskGran = Some(3) override lazy val libMaskGran = Some(1) - it should "be enabled when non-power of two masks are supported" is (pending) + (it should "be enabled when non-power of two masks are supported").is(pending) //compileExecuteAndTest(mem, lib, v, output) } @@ -224,7 +236,7 @@ class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val memMaskGran = Some(7) override lazy val libMaskGran = Some(1) - it should "be enabled when non-power of two masks are supported" is (pending) + (it should "be enabled when non-power of two masks are supported").is(pending) //compileExecuteAndTest(mem, lib, v, output) } @@ -235,7 +247,7 @@ class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val memMaskGran = Some(9) override lazy val libMaskGran = Some(1) - it should "be enabled when non-power of two masks are supported" is (pending) + (it should "be enabled when non-power of two masks are supported").is(pending) //compileExecuteAndTest(mem, lib, v, output) } @@ -247,12 +259,12 @@ class SplitDepth2048x8_extraPort extends MacroCompilerSpec with HasSRAMGenerator override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) override lazy val extraPorts = List( - MacroExtraPort(name="extra_port", width=8, portType=Constant, value=0xff) + MacroExtraPort(name = "extra_port", width = 8, portType = Constant, value = 0xff) ) override lazy val extraTag = "extraPort" override def generateOutput(): String = -""" + """ circuit target_memory : module target_memory : input outer_addr : UInt<11> @@ -317,22 +329,22 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener val v = "split_depth-r-w-split-lib-split-mem.v" val libMacro = SRAMMacro( - name="awesome_lib_mem", - width=width, - depth=libDepth, - family="1r1w", - ports=Seq( + name = "awesome_lib_mem", + width = width, + depth = libDepth, + family = "1r1w", + ports = Seq( generateReadPort("innerA", width, libDepth), generateWritePort("innerB", width, libDepth) ) ) val memMacro = SRAMMacro( - name="target_memory", - width=width, - depth=memDepth, - family="1r1w", - ports=Seq( + name = "target_memory", + width = width, + depth = memDepth, + family = "1r1w", + ports = Seq( generateReadPort("outerB", width, memDepth), generateWritePort("outerA", width, memDepth) ) @@ -342,7 +354,7 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener writeToLib(lib, Seq(libMacro)) val output = -""" + """ circuit target_memory : module target_memory : input outerB_addr : UInt<11> @@ -404,11 +416,11 @@ circuit target_memory : val v = "split_depth-r-w-regular-lib-split-mem.v" val memMacro = SRAMMacro( - name="target_memory", - width=width, - depth=memDepth, - family="1r1w", - ports=Seq( + name = "target_memory", + width = width, + depth = memDepth, + family = "1r1w", + ports = Seq( generateReadPort("outerB", width, memDepth), generateWritePort("outerA", width, memDepth) ) @@ -418,7 +430,7 @@ circuit target_memory : writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, libDepth))) val output = -""" + """ TODO """ @@ -437,11 +449,11 @@ TODO val v = "split_depth-r-w-split-lib-regular-mem.v" val libMacro = SRAMMacro( - name="awesome_lib_mem", - width=width, - depth=libDepth, - family="1rw", - ports=Seq( + name = "awesome_lib_mem", + width = width, + depth = libDepth, + family = "1rw", + ports = Seq( generateReadPort("innerA", width, libDepth), generateWritePort("innerB", width, libDepth) ) @@ -451,7 +463,7 @@ TODO writeToLib(lib, Seq(libMacro)) val output = -""" + """ TODO """ @@ -478,22 +490,22 @@ class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerato val v = "split_depth-r-mw-split-lib-split-mem.v" val libMacro = SRAMMacro( - name="awesome_lib_mem", - width=width, - depth=libDepth, - family="1r1w", - ports=Seq( + name = "awesome_lib_mem", + width = width, + depth = libDepth, + family = "1r1w", + ports = Seq( generateReadPort("innerA", width, libDepth), generateWritePort("innerB", width, libDepth, libMaskGran) ) ) val memMacro = SRAMMacro( - name="target_memory", - width=width, - depth=memDepth, - family="1r1w", - ports=Seq( + name = "target_memory", + width = width, + depth = memDepth, + family = "1r1w", + ports = Seq( generateReadPort("outerB", width, memDepth), generateWritePort("outerA", width, memDepth, memMaskGran) ) @@ -503,7 +515,7 @@ class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerato writeToLib(lib, Seq(libMacro)) val output = -""" + """ circuit target_memory : module target_memory : input outerB_addr : UInt<11> @@ -569,11 +581,11 @@ circuit target_memory : val v = "split_depth-r-mw-regular-lib-split-mem.v" val memMacro = SRAMMacro( - name="target_memory", - width=width, - depth=memDepth, - family="1r1w", - ports=Seq( + name = "target_memory", + width = width, + depth = memDepth, + family = "1r1w", + ports = Seq( generateReadPort("outerB", width, memDepth), generateWritePort("outerA", width, memDepth, memMaskGran) ) @@ -583,7 +595,7 @@ circuit target_memory : writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, libDepth, libMaskGran))) val output = -""" + """ TODO """ @@ -602,11 +614,11 @@ TODO val v = "split_depth-r-mw-split-lib-regular-mem.v" val libMacro = SRAMMacro( - name="awesome_lib_mem", - width=width, - depth=libDepth, - family="1rw", - ports=Seq( + name = "awesome_lib_mem", + width = width, + depth = libDepth, + family = "1rw", + ports = Seq( generateReadPort("innerA", width, libDepth), generateWritePort("innerB", width, libDepth, libMaskGran) ) @@ -616,7 +628,7 @@ TODO writeToLib(lib, Seq(libMacro)) val output = -""" + """ TODO """ diff --git a/macros/src/test/scala/barstools/macros/SimpleSplitWidth.scala b/macros/src/test/scala/barstools/macros/SimpleSplitWidth.scala index 843eed494..3cd0a6df0 100644 --- a/macros/src/test/scala/barstools/macros/SimpleSplitWidth.scala +++ b/macros/src/test/scala/barstools/macros/SimpleSplitWidth.scala @@ -5,43 +5,45 @@ package barstools.macros trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => - def depth: BigInt + def depth: BigInt - override lazy val memDepth = depth - override lazy val libDepth = depth + override lazy val memDepth = depth + override lazy val libDepth = depth - override def generateBody(): String = { - val output = new StringBuilder + override def generateBody(): String = { + val output = new StringBuilder - // Generate mem_0_ lines for number of width instances. - output.append( - ((0 to widthInstances - 1) map {i:Int => s""" + // Generate mem_0_ lines for number of width instances. + output.append( + ((0 to widthInstances - 1).map { i: Int => + s""" inst mem_0_${i} of ${lib_name} """ - }).reduceLeft(_ + _) - ) + }).reduceLeft(_ + _) + ) - // Generate submemory connection blocks. - output append (for (i <- 0 to widthInstances - 1) yield { - // Width of this submemory. - val myMemWidth = if (i == widthInstances - 1) lastWidthBits else usableLibWidth - // Base bit of this submemory. - // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this - // would be 16. - val myBaseBit = usableLibWidth*i - - val maskStatement = generateMaskStatement(i, 0) - - // We need to use writeEnable as a crude "mask" if mem has a mask but - // lib does not. - val writeEnableBit = if (libMaskGran.isEmpty && memMaskGran.isDefined) { - val outerMaskBit = myBaseBit / memMaskGran.get - s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" - } else """UInt<1>("h1")""" - val chipEnable = s"""UInt<1>("h1")""" - val writeEnableExpr = if (libMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, ${chipEnable})" else s"${memPortPrefix}_write_en" - -s""" + // Generate submemory connection blocks. + output.append((for (i <- 0 to widthInstances - 1) yield { + // Width of this submemory. + val myMemWidth = if (i == widthInstances - 1) lastWidthBits else usableLibWidth + // Base bit of this submemory. + // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this + // would be 16. + val myBaseBit = usableLibWidth * i + + val maskStatement = generateMaskStatement(i, 0) + + // We need to use writeEnable as a crude "mask" if mem has a mask but + // lib does not. + val writeEnableBit = if (libMaskGran.isEmpty && memMaskGran.isDefined) { + val outerMaskBit = myBaseBit / memMaskGran.get + s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" + } else """UInt<1>("h1")""" + val chipEnable = s"""UInt<1>("h1")""" + val writeEnableExpr = + if (libMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, ${chipEnable})" else s"${memPortPrefix}_write_en" + + s""" mem_0_${i}.${libPortPrefix}_clk <= ${memPortPrefix}_clk mem_0_${i}.${libPortPrefix}_addr <= ${memPortPrefix}_addr node ${memPortPrefix}_dout_0_${i} = bits(mem_0_${i}.${libPortPrefix}_dout, ${myMemWidth - 1}, 0) @@ -49,24 +51,23 @@ s""" ${maskStatement} mem_0_${i}.${libPortPrefix}_write_en <= and(and(${writeEnableExpr}, ${writeEnableBit}), UInt<1>("h1")) """ - }).reduceLeft(_ + _) - - // Generate final output that concats together the sub-memories. - // e.g. cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0)) - output append { - val doutStatements = ((widthInstances - 1 to 0 by -1) map (i => s"${memPortPrefix}_dout_0_${i}")) - val catStmt = doutStatements.init.foldRight(doutStatements.last)((l: String, r: String) => s"cat($l, $r)") -s""" + }).reduceLeft(_ + _)) + + // Generate final output that concats together the sub-memories. + // e.g. cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0)) + output.append { + val doutStatements = ((widthInstances - 1 to 0 by -1).map(i => s"${memPortPrefix}_dout_0_${i}")) + val catStmt = doutStatements.init.foldRight(doutStatements.last)((l: String, r: String) => s"cat($l, $r)") + s""" node ${memPortPrefix}_dout_0 = ${catStmt} """ - } + } - output append -s""" + output.append(s""" ${memPortPrefix}_dout <= mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<${memWidth}>("h0")) -""" - output.toString - } +""") + output.toString + } } // Try different widths against a base memory width of 8. @@ -268,7 +269,10 @@ class SplitWidth1024x16_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator // Masked RAM -class SplitWidth1024x8_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x8_memGran_8_libGran_1_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 8 override lazy val libWidth = 8 @@ -278,7 +282,10 @@ class SplitWidth1024x8_memGran_8_libGran_1_rw extends MacroCompilerSpec with Has compileExecuteAndTest(mem, lib, v, output) } -class SplitWidth1024x16_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x16_memGran_8_libGran_1_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 @@ -288,7 +295,10 @@ class SplitWidth1024x16_memGran_8_libGran_1_rw extends MacroCompilerSpec with Ha compileExecuteAndTest(mem, lib, v, output) } -class SplitWidth1024x16_memGran_8_libGran_8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x16_memGran_8_libGran_8_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 @@ -298,7 +308,10 @@ class SplitWidth1024x16_memGran_8_libGran_8_rw extends MacroCompilerSpec with Ha compileExecuteAndTest(mem, lib, v, output) } -class SplitWidth1024x128_memGran_8_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x128_memGran_8_libGran_1_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 128 override lazy val libWidth = 32 @@ -308,7 +321,10 @@ class SplitWidth1024x128_memGran_8_libGran_1_rw extends MacroCompilerSpec with H compileExecuteAndTest(mem, lib, v, output) } -class SplitWidth1024x16_memGran_4_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x16_memGran_4_libGran_1_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 @@ -318,7 +334,10 @@ class SplitWidth1024x16_memGran_4_libGran_1_rw extends MacroCompilerSpec with Ha compileExecuteAndTest(mem, lib, v, output) } -class SplitWidth1024x16_memGran_2_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x16_memGran_2_libGran_1_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 @@ -328,7 +347,10 @@ class SplitWidth1024x16_memGran_2_libGran_1_rw extends MacroCompilerSpec with Ha compileExecuteAndTest(mem, lib, v, output) } -class SplitWidth1024x16_memGran_16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x16_memGran_16_libGran_1_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 @@ -360,7 +382,10 @@ class SplitWidth1024x16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGener // Non-memMask and non-1 libMask -class SplitWidth1024x16_memGran_8_libGran_2_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x16_memGran_8_libGran_2_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 @@ -372,21 +397,27 @@ class SplitWidth1024x16_memGran_8_libGran_2_rw extends MacroCompilerSpec with Ha // Non-power of two memGran -class SplitWidth1024x16_memGran_9_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x16_memGran_9_libGran_1_rw + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 override lazy val memMaskGran = Some(9) override lazy val libMaskGran = Some(1) - it should "be enabled when non-power of two masks are supported" is (pending) + (it should "be enabled when non-power of two masks are supported").is(pending) //~ compile(mem, lib, v, false) //~ execute(mem, lib, false, output) } // Read enable -class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x32_readEnable_Lib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { import mdf.macrolib._ override lazy val depth = BigInt(1024) @@ -395,20 +426,27 @@ class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGen override def generateLibSRAM() = { SRAMMacro( - name=lib_name, - width=libWidth, - depth=libDepth, - family="1rw", - ports=Seq(generateTestPort( - "lib", Some(libWidth), Some(libDepth), maskGran=libMaskGran, - write=true, writeEnable=true, - read=true, readEnable=true - )) + name = lib_name, + width = libWidth, + depth = libDepth, + family = "1rw", + ports = Seq( + generateTestPort( + "lib", + Some(libWidth), + Some(libDepth), + maskGran = libMaskGran, + write = true, + writeEnable = true, + read = true, + readEnable = true + ) + ) ) } override def generateBody() = -""" + """ inst mem_0_0 of awesome_lib_mem inst mem_0_1 of awesome_lib_mem inst mem_0_2 of awesome_lib_mem @@ -444,7 +482,10 @@ class SplitWidth1024x32_readEnable_Lib extends MacroCompilerSpec with HasSRAMGen compileExecuteAndTest(mem, lib, v, output) } -class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x32_readEnable_Mem + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { import mdf.macrolib._ override lazy val depth = BigInt(1024) @@ -453,15 +494,22 @@ class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGen override def generateMemSRAM() = { SRAMMacro( - name=mem_name, - width=memWidth, - depth=memDepth, - family="1rw", - ports=Seq(generateTestPort( - "outer", Some(memWidth), Some(memDepth), maskGran=memMaskGran, - write=true, writeEnable=true, - read=true, readEnable=true - )) + name = mem_name, + width = memWidth, + depth = memDepth, + family = "1rw", + ports = Seq( + generateTestPort( + "outer", + Some(memWidth), + Some(memDepth), + maskGran = memMaskGran, + write = true, + writeEnable = true, + read = true, + readEnable = true + ) + ) ) } @@ -470,7 +518,10 @@ class SplitWidth1024x32_readEnable_Mem extends MacroCompilerSpec with HasSRAMGen compileExecuteAndTest(mem, lib, v, output) } -class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { +class SplitWidth1024x32_readEnable_LibMem + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator { import mdf.macrolib._ override lazy val depth = BigInt(1024) @@ -479,34 +530,48 @@ class SplitWidth1024x32_readEnable_LibMem extends MacroCompilerSpec with HasSRAM override def generateLibSRAM() = { SRAMMacro( - name=lib_name, - width=libWidth, - depth=libDepth, - family="1rw", - ports=Seq(generateTestPort( - "lib", Some(libWidth), Some(libDepth), maskGran=libMaskGran, - write=true, writeEnable=true, - read=true, readEnable=true - )) + name = lib_name, + width = libWidth, + depth = libDepth, + family = "1rw", + ports = Seq( + generateTestPort( + "lib", + Some(libWidth), + Some(libDepth), + maskGran = libMaskGran, + write = true, + writeEnable = true, + read = true, + readEnable = true + ) + ) ) } override def generateMemSRAM() = { SRAMMacro( - name=mem_name, - width=memWidth, - depth=memDepth, - family="1rw", - ports=Seq(generateTestPort( - "outer", Some(memWidth), Some(memDepth), maskGran=memMaskGran, - write=true, writeEnable=true, - read=true, readEnable=true - )) + name = mem_name, + width = memWidth, + depth = memDepth, + family = "1rw", + ports = Seq( + generateTestPort( + "outer", + Some(memWidth), + Some(memDepth), + maskGran = memMaskGran, + write = true, + writeEnable = true, + read = true, + readEnable = true + ) + ) ) } override def generateBody() = -""" + """ inst mem_0_0 of awesome_lib_mem inst mem_0_1 of awesome_lib_mem inst mem_0_2 of awesome_lib_mem diff --git a/macros/src/test/scala/barstools/macros/SpecificExamples.scala b/macros/src/test/scala/barstools/macros/SpecificExamples.scala index e41932bb6..334e3a73e 100644 --- a/macros/src/test/scala/barstools/macros/SpecificExamples.scala +++ b/macros/src/test/scala/barstools/macros/SpecificExamples.scala @@ -29,8 +29,8 @@ class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { override val libPrefix = "macros/src/test/resources" - val memSRAMs = mdf.macrolib.Utils.readMDFFromString( -""" + val memSRAMs = mdf.macrolib.Utils + .readMDFFromString(""" [ { "type" : "sram", "name" : "cc_banks_0_ext", @@ -58,7 +58,7 @@ class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { writeToMem(mem, memSRAMs) val output = -""" + """ circuit cc_banks_0_ext : module cc_banks_0_ext : input RW0_addr : UInt<12> @@ -99,8 +99,8 @@ class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { override val libPrefix = "macros/src/test/resources" - val memSRAMs = mdf.macrolib.Utils.readMDFFromString( -""" + val memSRAMs = mdf.macrolib.Utils + .readMDFFromString(""" [ { "type" : "sram", "name" : "cc_dir_ext", @@ -131,7 +131,7 @@ class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { writeToMem(mem, memSRAMs) val output = -""" + """ circuit cc_dir_ext : module cc_dir_ext : input RW0_addr : UInt<9> @@ -183,8 +183,8 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { override val libPrefix = "macros/src/test/resources" - val memSRAMs = mdf.macrolib.Utils.readMDFFromString( -""" + val memSRAMs = mdf.macrolib.Utils + .readMDFFromString(""" [ { "type" : "sram", "name" : "_T_182_ext", @@ -354,7 +354,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { writeToMem(mem, memSRAMs) val output = // TODO: check correctness... -""" + """ circuit smem_0_ext : module _T_182_ext : input R0_addr : UInt<6> @@ -1350,14 +1350,14 @@ circuit smem_0_ext : class SmallTagArrayTest extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleTestGenerator { // Test that mapping a smaller memory using a larger lib can still work. - override def memWidth: Int = 26 - override def memDepth: BigInt = BigInt(2) - override def memMaskGran: Option[Int] = Some(26) + override def memWidth: Int = 26 + override def memDepth: BigInt = BigInt(2) + override def memMaskGran: Option[Int] = Some(26) override def memPortPrefix: String = "" - override def libWidth: Int = 32 - override def libDepth: BigInt = BigInt(64) - override def libMaskGran: Option[Int] = Some(1) + override def libWidth: Int = 32 + override def libDepth: BigInt = BigInt(64) + override def libMaskGran: Option[Int] = Some(1) override def libPortPrefix: String = "" override def extraPorts: Seq[MacroExtraPort] = Seq( @@ -1388,73 +1388,73 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { val libSRAMs = Seq( SRAMMacro( - name="SRAM1RW1024x8", - depth=1024, - width=8, - family="1rw", - ports=Seq( + name = "SRAM1RW1024x8", + depth = 1024, + width = 8, + family = "1rw", + ports = Seq( generateReadWritePort("", 8, BigInt(1024)) ) ), SRAMMacro( - name="SRAM1RW512x32", - depth=512, - width=32, - family="1rw", - ports=Seq( + name = "SRAM1RW512x32", + depth = 512, + width = 32, + family = "1rw", + ports = Seq( generateReadWritePort("", 32, BigInt(512)) ) ), SRAMMacro( - name="SRAM1RW64x128", - depth=64, - width=128, - family="1rw", - ports=Seq( + name = "SRAM1RW64x128", + depth = 64, + width = 128, + family = "1rw", + ports = Seq( generateReadWritePort("", 128, BigInt(64)) ) ), SRAMMacro( - name="SRAM1RW64x32", - depth=64, - width=32, - family="1rw", - ports=Seq( + name = "SRAM1RW64x32", + depth = 64, + width = 32, + family = "1rw", + ports = Seq( generateReadWritePort("", 32, BigInt(64)) ) ), SRAMMacro( - name="SRAM1RW64x8", - depth=64, - width=8, - family="1rw", - ports=Seq( + name = "SRAM1RW64x8", + depth = 64, + width = 8, + family = "1rw", + ports = Seq( generateReadWritePort("", 8, BigInt(64)) ) ), SRAMMacro( - name="SRAM1RW512x8", - depth=512, - width=8, - family="1rw", - ports=Seq( + name = "SRAM1RW512x8", + depth = 512, + width = 8, + family = "1rw", + ports = Seq( generateReadWritePort("", 8, BigInt(512)) ) ), SRAMMacro( - name="SRAM2RW64x32", - depth=64, - width=32, - family="1r1w", - ports=Seq( + name = "SRAM2RW64x32", + depth = 64, + width = 32, + family = "1r1w", + ports = Seq( generateReadPort("portA", 32, BigInt(64)), generateWritePort("portB", 32, BigInt(64)) ) ) ) - val memSRAMs = mdf.macrolib.Utils.readMDFFromString( -""" + val memSRAMs = mdf.macrolib.Utils + .readMDFFromString(""" [ { "type": "sram", @@ -1537,7 +1537,7 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { writeToMem(mem, memSRAMs) val output = // TODO: check correctness... -""" + """ circuit T_2172_ext : module tag_array_ext : input RW0_addr : UInt<6> diff --git a/macros/src/test/scala/barstools/macros/SynFlops.scala b/macros/src/test/scala/barstools/macros/SynFlops.scala index f12161a18..0d39220ec 100644 --- a/macros/src/test/scala/barstools/macros/SynFlops.scala +++ b/macros/src/test/scala/barstools/macros/SynFlops.scala @@ -4,13 +4,13 @@ package barstools.macros trait HasSynFlopsTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => - def generateFlops: String = { -s""" + def generateFlops: String = { + s""" inst mem_0_0 of split_${lib_name} mem_0_0.${libPortPrefix}_clk <= ${libPortPrefix}_clk mem_0_0.${libPortPrefix}_addr <= ${libPortPrefix}_addr - node ${libPortPrefix}_dout_0_0 = bits(mem_0_0.${libPortPrefix}_dout, ${libWidth-1}, 0) - mem_0_0.${libPortPrefix}_din <= bits(${libPortPrefix}_din, ${libWidth-1}, 0) + node ${libPortPrefix}_dout_0_0 = bits(mem_0_0.${libPortPrefix}_dout, ${libWidth - 1}, 0) + mem_0_0.${libPortPrefix}_din <= bits(${libPortPrefix}_din, ${libWidth - 1}, 0) mem_0_0.${libPortPrefix}_write_en <= and(and(and(${libPortPrefix}_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node ${libPortPrefix}_dout_0 = ${libPortPrefix}_dout_0_0 ${libPortPrefix}_dout <= mux(UInt<1>("h1"), ${libPortPrefix}_dout_0, UInt<${libWidth}>("h0")) @@ -37,49 +37,66 @@ s""" ${libPortPrefix}_dout <= ram.RW_0.rdata ram.RW_0.wdata <= ${libPortPrefix}_din """ - } - - // If there is no lib, put the flops definition into the body. - abstract override def generateBody = { - if (this.isInstanceOf[HasNoLibTestGenerator]) generateFlops else super.generateBody - } - - // If there is no lib, don't generate a footer, since the flops definition - // will be in the body. - override def generateFooter = { - if (this.isInstanceOf[HasNoLibTestGenerator]) "" else -s""" + } + + // If there is no lib, put the flops definition into the body. + abstract override def generateBody = { + if (this.isInstanceOf[HasNoLibTestGenerator]) generateFlops else super.generateBody + } + + // If there is no lib, don't generate a footer, since the flops definition + // will be in the body. + override def generateFooter = { + if (this.isInstanceOf[HasNoLibTestGenerator]) "" + else + s""" module ${lib_name} : ${generateFooterPorts} ${generateFlops} """ - } + } } -class Synflops2048x8_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { +class Synflops2048x8_noLib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasNoLibTestGenerator + with HasSynFlopsTestGenerator { override lazy val memDepth = BigInt(2048) override lazy val memWidth = 8 compileExecuteAndTest(mem, None, v, output, true) } -class Synflops2048x16_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { +class Synflops2048x16_noLib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasNoLibTestGenerator + with HasSynFlopsTestGenerator { override lazy val memDepth = BigInt(2048) override lazy val memWidth = 16 compileExecuteAndTest(mem, None, v, output, true) } -class Synflops8192x16_noLib extends MacroCompilerSpec with HasSRAMGenerator with HasNoLibTestGenerator with HasSynFlopsTestGenerator { +class Synflops8192x16_noLib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasNoLibTestGenerator + with HasSynFlopsTestGenerator { override lazy val memDepth = BigInt(8192) override lazy val memWidth = 16 compileExecuteAndTest(mem, None, v, output, true) } -class Synflops2048x16_depth_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { +class Synflops2048x16_depth_Lib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with HasSynFlopsTestGenerator { override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) override lazy val width = 16 @@ -87,7 +104,11 @@ class Synflops2048x16_depth_Lib extends MacroCompilerSpec with HasSRAMGenerator compileExecuteAndTest(mem, lib, v, output, true) } -class Synflops2048x64_width_Lib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator with HasSynFlopsTestGenerator { +class Synflops2048x64_width_Lib + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleWidthTestGenerator + with HasSynFlopsTestGenerator { override lazy val memWidth = 64 override lazy val libWidth = 8 override lazy val depth = BigInt(1024) @@ -95,7 +116,11 @@ class Synflops2048x64_width_Lib extends MacroCompilerSpec with HasSRAMGenerator compileExecuteAndTest(mem, lib, v, output, true) } -class Synflops_SplitPorts_Read_Write extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { +class Synflops_SplitPorts_Read_Write + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with HasSynFlopsTestGenerator { import mdf.macrolib._ override lazy val memDepth = BigInt(2048) @@ -103,29 +128,29 @@ class Synflops_SplitPorts_Read_Write extends MacroCompilerSpec with HasSRAMGener override lazy val width = 8 override def generateLibSRAM = SRAMMacro( - name=lib_name, - width=width, - depth=libDepth, - family="1r1w", - ports=Seq( + name = lib_name, + width = width, + depth = libDepth, + family = "1r1w", + ports = Seq( generateReadPort("innerA", width, libDepth), generateWritePort("innerB", width, libDepth) ) ) override def generateMemSRAM = SRAMMacro( - name=mem_name, - width=width, - depth=memDepth, - family="1r1w", - ports=Seq( + name = mem_name, + width = width, + depth = memDepth, + family = "1r1w", + ports = Seq( generateReadPort("outerB", width, memDepth), generateWritePort("outerA", width, memDepth) ) ) override def generateHeader = -""" + """ circuit target_memory : module target_memory : input outerB_addr : UInt<11> @@ -138,7 +163,7 @@ circuit target_memory : """ override def generateBody = -""" + """ node outerB_addr_sel = bits(outerB_addr, 10, 10) reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : reset => (UInt<1>("h0"), outerB_addr_sel_reg) @@ -166,7 +191,7 @@ circuit target_memory : """ override def generateFooterPorts = -""" + """ input innerA_addr : UInt<10> input innerA_clk : Clock output innerA_dout : UInt<8> @@ -177,7 +202,7 @@ circuit target_memory : """ override def generateFlops = -""" + """ inst mem_0_0 of split_awesome_lib_mem mem_0_0.innerB_clk <= innerB_clk mem_0_0.innerB_addr <= innerB_addr @@ -222,7 +247,11 @@ circuit target_memory : } } -class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator with HasSynFlopsTestGenerator { +class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite + extends MacroCompilerSpec + with HasSRAMGenerator + with HasSimpleDepthTestGenerator + with HasSynFlopsTestGenerator { import mdf.macrolib._ override lazy val memDepth = BigInt(2048) @@ -232,29 +261,29 @@ class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite extends MacroCompilerSpec w override lazy val libMaskGran = Some(1) override def generateLibSRAM = SRAMMacro( - name=lib_name, - width=width, - depth=libDepth, - family="1r1w", - ports=Seq( + name = lib_name, + width = width, + depth = libDepth, + family = "1r1w", + ports = Seq( generateReadPort("innerA", width, libDepth), generateWritePort("innerB", width, libDepth, libMaskGran) ) ) override def generateMemSRAM = SRAMMacro( - name=mem_name, - width=width, - depth=memDepth, - family="1r1w", - ports=Seq( + name = mem_name, + width = width, + depth = memDepth, + family = "1r1w", + ports = Seq( generateReadPort("outerB", width, memDepth), generateWritePort("outerA", width, memDepth, memMaskGran) ) ) override def generateHeader = -""" + """ circuit target_memory : module target_memory : input outerB_addr : UInt<11> @@ -268,7 +297,7 @@ circuit target_memory : """ override def generateBody = -""" + """ node outerB_addr_sel = bits(outerB_addr, 10, 10) reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : reset => (UInt<1>("h0"), outerB_addr_sel_reg) @@ -298,7 +327,7 @@ circuit target_memory : """ override def generateFooterPorts = -""" + """ input innerA_addr : UInt<10> input innerA_clk : Clock output innerA_dout : UInt<8> @@ -310,7 +339,7 @@ circuit target_memory : """ override def generateFlops = -""" + """ inst mem_0_0 of split_awesome_lib_mem inst mem_0_1 of split_awesome_lib_mem inst mem_0_2 of split_awesome_lib_mem From caa1467d8718e530d81b0747539d4280437205ec Mon Sep 17 00:00:00 2001 From: chick Date: Wed, 3 Feb 2021 17:51:30 -0800 Subject: [PATCH 199/273] Reformat all scala files in tapeout - Mostly this reformat comments and large argument lists to classes and methods --- .../transforms/AddSuffixToModuleNames.scala | 9 +- .../transforms/AvoidExtModuleCollisions.scala | 5 +- .../transforms/ConvertToExtModPass.scala | 5 +- .../tapeout/transforms/EnumerateModules.scala | 8 +- .../tapeout/transforms/Generate.scala | 96 +++++++++++-------- .../tapeout/transforms/ReParentCircuit.scala | 9 +- .../transforms/RemoveUnusedModules.scala | 20 ++-- .../tapeout/transforms/ResetInverter.scala | 13 ++- .../tapeout/transforms/retime/Retime.scala | 25 ++--- .../tapeout/transforms/utils/FileUtils.scala | 19 ++-- .../transforms/utils/LowerAnnotations.scala | 4 +- .../transforms/utils/ProgrammaticBundle.scala | 12 ++- .../transforms/utils/YamlHelpers.scala | 10 +- .../transforms/ResetInverterSpec.scala | 19 ++-- .../transforms/retime/RetimeSpec.scala | 44 +++++---- 15 files changed, 163 insertions(+), 135 deletions(-) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala index 1e7eaa26a..e2c5620dc 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala @@ -9,8 +9,7 @@ import firrtl.ir._ import firrtl.stage.Forms import firrtl.stage.TransformManager.TransformDependency -case class KeepNameAnnotation(target: ModuleTarget) - extends SingleTargetAnnotation[ModuleTarget] { +case class KeepNameAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { def duplicate(n: ModuleTarget) = this.copy(n) } @@ -21,8 +20,8 @@ case class ModuleNameSuffixAnnotation(target: CircuitTarget, suffix: String) class AddSuffixToModuleNames extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters override def invalidates(a: Transform): Boolean = false @@ -37,7 +36,7 @@ class AddSuffixToModuleNames extends Transform with DependencyAPIMigration { val excludeSet = state.circuit.modules.flatMap { case e: ExtModule => Some(e.name) case m if (m.name == state.circuit.main) => Some(m.name) - case _ => None + case _ => None }.toSet val renamer = { (name: String) => if (excludeSet(name)) name else name + suffix } diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala index 43aea6c13..127a37fa7 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala @@ -14,7 +14,7 @@ case class LinkExtModulesAnnotation(mustLink: Seq[ExtModule]) extends NoTargetAn class AvoidExtModuleCollisions extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.HighForm + override def prerequisites: Seq[TransformDependency] = Forms.HighForm override def optionalPrerequisites: Seq[TransformDependency] = Seq(Dependency[RemoveUnusedModules]) override def optionalPrerequisiteOf: Seq[TransformDependency] = { Forms.HighEmitters :+ Dependency[ReplSeqMem] @@ -24,10 +24,9 @@ class AvoidExtModuleCollisions extends Transform with DependencyAPIMigration { def execute(state: CircuitState): CircuitState = { val mustLink = state.annotations.flatMap { case LinkExtModulesAnnotation(mustLink) => mustLink - case _ => Nil + case _ => Nil } val newAnnos = state.annotations.filterNot(_.isInstanceOf[LinkExtModulesAnnotation]) state.copy(circuit = state.circuit.copy(modules = state.circuit.modules ++ mustLink), annotations = newAnnos) } } - diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala index e6d2272a9..a81937a3a 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala @@ -10,8 +10,7 @@ import firrtl.passes.memlib.ReplSeqMem import firrtl.stage.Forms import firrtl.stage.TransformManager.TransformDependency -case class ConvertToExtModAnnotation(target: ModuleTarget) - extends SingleTargetAnnotation[ModuleTarget] { +case class ConvertToExtModAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { def duplicate(n: ModuleTarget) = this.copy(n) } @@ -20,7 +19,7 @@ case class ConvertToExtModAnnotation(target: ModuleTarget) // otherwise it's left alone. class ConvertToExtMod extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.HighForm + override def prerequisites: Seq[TransformDependency] = Forms.HighForm override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty override def optionalPrerequisiteOf: Seq[TransformDependency] = { Forms.HighEmitters ++ Seq(Dependency[RemoveUnusedModules], Dependency[ReplSeqMem]) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala index 6a732d754..182b0071b 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala @@ -23,10 +23,12 @@ class EnumerateModulesPass(enumerate: (Module) => Unit) extends Pass { } class EnumerateModules(enumerate: (Module) => Unit) - extends Transform with SeqTransformBased with DependencyAPIMigration { + extends Transform + with SeqTransformBased + with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters override def invalidates(a: Transform): Boolean = false diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala index 5abb71c1d..01ea56e09 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala @@ -13,130 +13,150 @@ trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => parser.note("tapeout options") - parser.opt[String]("harness-o") + parser + .opt[String]("harness-o") .abbr("tho") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( harnessOutput = Some(x) ) - }.text { + } + .text { "use this to generate a harness at " } - parser.opt[String]("syn-top") + parser + .opt[String]("syn-top") .abbr("tst") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( synTop = Some(x) ) - }.text { + } + .text { "use this to set synTop" } - parser.opt[String]("top-fir") + parser + .opt[String]("top-fir") .abbr("tsf") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( topFir = Some(x) ) - }.text { + } + .text { "use this to set topFir" } - parser.opt[String]("top-anno-out") + parser + .opt[String]("top-anno-out") .abbr("tsaof") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( topAnnoOut = Some(x) ) - }.text { + } + .text { "use this to set topAnnoOut" } - parser.opt[String]("top-dotf-out") + parser + .opt[String]("top-dotf-out") .abbr("tdf") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( topDotfOut = Some(x) ) - }.text { + } + .text { "use this to set the filename for the top resource .f file" } - parser.opt[String]("harness-top") + parser + .opt[String]("harness-top") .abbr("tht") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( harnessTop = Some(x) ) - }.text { + } + .text { "use this to set harnessTop" } - parser.opt[String]("harness-fir") + parser + .opt[String]("harness-fir") .abbr("thf") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( harnessFir = Some(x) ) - }.text { + } + .text { "use this to set harnessFir" } - parser.opt[String]("harness-anno-out") + parser + .opt[String]("harness-anno-out") .abbr("thaof") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( harnessAnnoOut = Some(x) ) - }.text { + } + .text { "use this to set harnessAnnoOut" } - parser.opt[String]("harness-dotf-out") + parser + .opt[String]("harness-dotf-out") .abbr("hdf") .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( harnessDotfOut = Some(x) ) - }.text { + } + .text { "use this to set the filename for the harness resource .f file" } - parser.opt[String]("harness-conf") + parser + .opt[String]("harness-conf") .abbr("thconf") - .valueName ("") + .valueName("") .foreach { x => tapeoutOptions = tapeoutOptions.copy( harnessConf = Some(x) ) - }.text { + } + .text { "use this to set the harness conf file location" } } case class TapeoutOptions( - harnessOutput: Option[String] = None, - synTop: Option[String] = None, - topFir: Option[String] = None, - topAnnoOut: Option[String] = None, - topDotfOut: Option[String] = None, - harnessTop: Option[String] = None, - harnessFir: Option[String] = None, + harnessOutput: Option[String] = None, + synTop: Option[String] = None, + topFir: Option[String] = None, + topAnnoOut: Option[String] = None, + topDotfOut: Option[String] = None, + harnessTop: Option[String] = None, + harnessFir: Option[String] = None, harnessAnnoOut: Option[String] = None, harnessDotfOut: Option[String] = None, - harnessConf: Option[String] = None -) extends LazyLogging + harnessConf: Option[String] = None) + extends LazyLogging // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness @@ -190,9 +210,9 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => annoFile.foreach { annoPath => val outputFile = new java.io.PrintWriter(annoPath) outputFile.write(JsonProtocol.serialize(res.circuitState.annotations.filter(_ match { - case da: DeletedAnnotation => false - case ec: EmittedComponent => false - case ea: EmittedAnnotation[_] => false + case da: DeletedAnnotation => false + case ec: EmittedComponent => false + case ea: EmittedAnnotation[_] => false case fca: FirrtlCircuitAnnotation => false case _ => true }))) @@ -207,7 +227,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => result match { case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.topFir, tapeoutOptions.topAnnoOut) - x.circuitState.circuit.modules.collect{ case e: ExtModule => e } + x.circuitState.circuit.modules.collect { case e: ExtModule => e } case x => throw new Exception(s"executeTop failed while executing FIRRTL!\n${x}") } @@ -220,9 +240,9 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => val harnessAnnos = tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)).toSeq ++ - harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ - synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) :+ - LinkExtModulesAnnotation(topExtModules) + harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ + synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) :+ + LinkExtModulesAnnotation(topExtModules) // For harness run, change some firrtlOptions (below) for harness phase // customTransforms: setup harness transforms, add AvoidExtModuleCollisions @@ -233,7 +253,7 @@ sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => outputFileNameOverride = tapeoutOptions.harnessOutput.get, annotations = firrtlOptions.annotations.map({ case ReplSeqMemAnnotation(i, o) => ReplSeqMemAnnotation(i, tapeoutOptions.harnessConf.get) - case a => a + case a => a }) ++ harnessAnnos ) val harnessResult = firrtl.Driver.execute(optionsManager) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala index f7929ee65..103561765 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala @@ -8,14 +8,13 @@ import firrtl.options.Dependency import firrtl.stage.Forms import firrtl.stage.TransformManager.TransformDependency -case class ReParentCircuitAnnotation(target: ModuleTarget) - extends SingleTargetAnnotation[ModuleTarget] { +case class ReParentCircuitAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { def duplicate(n: ModuleTarget) = this.copy(n) } class ReParentCircuit extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.HighForm + override def prerequisites: Seq[TransformDependency] = Forms.HighForm override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty override def optionalPrerequisiteOf: Seq[TransformDependency] = { Forms.HighEmitters :+ Dependency[RemoveUnusedModules] @@ -24,8 +23,8 @@ class ReParentCircuit extends Transform with DependencyAPIMigration { def execute(state: CircuitState): CircuitState = { val c = state.circuit - val newTopName = state.annotations.collectFirst { - case ReParentCircuitAnnotation(tgt) => tgt.module + val newTopName = state.annotations.collectFirst { case ReParentCircuitAnnotation(tgt) => + tgt.module } val newCircuit = c.copy(main = newTopName.getOrElse(c.main)) val mainRename = newTopName.map { s => diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala index 7aac89781..5d1cbc6cd 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala @@ -14,7 +14,7 @@ import firrtl.stage.TransformManager.TransformDependency // instance (starting at the main module) class RemoveUnusedModules extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.HighForm + override def prerequisites: Seq[TransformDependency] = Forms.HighForm override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty override def optionalPrerequisiteOf: Seq[TransformDependency] = { Forms.HighEmitters :+ Dependency[ReplSeqMem] @@ -22,8 +22,8 @@ class RemoveUnusedModules extends Transform with DependencyAPIMigration { override def invalidates(a: Transform): Boolean = false def execute(state: CircuitState): CircuitState = { - val modulesByName = state.circuit.modules.map{ - case m: Module => (m.name, Some(m)) + val modulesByName = state.circuit.modules.map { + case m: Module => (m.name, Some(m)) case m: ExtModule => (m.name, None) }.toMap @@ -33,7 +33,7 @@ class RemoveUnusedModules extends Transform with DependencyAPIMigration { def someStatements(statement: Statement): Seq[Statement] = statement match { case b: Block => - b.stmts.map{ someStatements(_) } + b.stmts.map { someStatements(_) } .foldLeft(Seq[Statement]())(_ ++ _) case when: Conditionally => someStatements(when.conseq) ++ someStatements(when.alt) @@ -41,11 +41,11 @@ class RemoveUnusedModules extends Transform with DependencyAPIMigration { case _ => Seq() } - someStatements(m.body).map{ - case s: DefInstance => Set(s.module) | getUsedModules(modulesByName(s.module)) - case _ => Set[String]() - }.foldLeft(Set(m.name))(_ | _) - } + someStatements(m.body).map { + case s: DefInstance => Set(s.module) | getUsedModules(modulesByName(s.module)) + case _ => Set[String]() + }.foldLeft(Set(m.name))(_ | _) + } case None => Set.empty[String] } @@ -57,7 +57,7 @@ class RemoveUnusedModules extends Transform with DependencyAPIMigration { val renames = state.renames.getOrElse(RenameMap()) - state.circuit.modules.filterNot { usedModuleSet contains _.name } foreach { x => + state.circuit.modules.filterNot { usedModuleSet contains _.name }.foreach { x => renames.record(ModuleTarget(state.circuit.main, x.name), Nil) } diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala index 1ccb18888..33d2f78aa 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala @@ -20,10 +20,9 @@ object ResetN extends Pass { // Only works on Modules with a Bool port named reset def invertReset(mod: Module): Module = { // Check that it actually has reset - require(mod.ports.exists(p => p.name == "reset" && p.tpe == Bool), - "Can only invert reset on a module with reset!") + require(mod.ports.exists(p => p.name == "reset" && p.tpe == Bool), "Can only invert reset on a module with reset!") // Rename "reset" to "reset_n" - val portsx = mod.ports map { + val portsx = mod.ports.map { case Port(info, "reset", Input, Bool) => Port(info, "reset_n", Input, Bool) case other => other @@ -34,7 +33,7 @@ object ResetN extends Pass { } def run(c: Circuit): Circuit = { - c.copy(modules = c.modules map { + c.copy(modules = c.modules.map { case mod: Module if mod.name == c.main => invertReset(mod) case other => other }) @@ -43,8 +42,8 @@ object ResetN extends Pass { class ResetInverterTransform extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters override def invalidates(a: Transform): Boolean = false @@ -64,7 +63,7 @@ trait ResetInverter { def invert[T <: chisel3.internal.LegacyModule](module: T): Unit = { chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation with RunFirrtlTransform { def transformClass: Class[_ <: Transform] = classOf[ResetInverterTransform] - def toFirrtl: Annotation = ResetInverterAnnotation(module.toNamed) + def toFirrtl: Annotation = ResetInverterAnnotation(module.toNamed) }) } } diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala index 010ef40b9..931af88d2 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala @@ -14,23 +14,24 @@ case class RetimeAnnotation(target: Named) extends SingleTargetAnnotation[Named] class RetimeTransform extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters override def invalidates(a: Transform): Boolean = false override def execute(state: CircuitState): CircuitState = { state.annotations.filter(_.isInstanceOf[RetimeAnnotation]) match { case Nil => state - case seq => seq.foreach { - case RetimeAnnotation(ModuleName(module, CircuitName(_))) => - logger.info(s"Retiming module $module") - case RetimeAnnotation(ComponentName(name, ModuleName(module, CircuitName(_)))) => - logger.info(s"Retiming instance $module.$name") - case _ => - throw new Exception(s"There should be RetimeAnnotations, got ${seq.mkString(" -- ")}") - } - state + case seq => + seq.foreach { + case RetimeAnnotation(ModuleName(module, CircuitName(_))) => + logger.info(s"Retiming module $module") + case RetimeAnnotation(ComponentName(name, ModuleName(module, CircuitName(_)))) => + logger.info(s"Retiming instance $module.$name") + case _ => + throw new Exception(s"There should be RetimeAnnotations, got ${seq.mkString(" -- ")}") + } + state } } } @@ -41,7 +42,7 @@ trait RetimeLib { def retime[T <: chisel3.internal.LegacyModule](module: T): Unit = { chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation with RunFirrtlTransform { def transformClass: Class[_ <: Transform] = classOf[RetimeTransform] - def toFirrtl: Annotation = RetimeAnnotation(module.toNamed) + def toFirrtl: Annotation = RetimeAnnotation(module.toNamed) }) } } diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala index ded0474d7..6e79b7e5f 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala @@ -2,7 +2,7 @@ package barstools.tapeout.transforms -import chisel3.experimental.{ChiselAnnotation, annotate} +import chisel3.experimental.{annotate, ChiselAnnotation} import firrtl._ import firrtl.annotations._ import firrtl.stage.Forms @@ -12,7 +12,7 @@ import firrtl.transforms.BlackBoxTargetDirAnno object WriteConfig { def apply(dir: String, file: String, contents: String): Unit = { val writer = new java.io.PrintWriter(new java.io.File(s"$dir/$file")) - writer write contents + writer.write(contents) writer.close() } } @@ -22,14 +22,14 @@ object GetTargetDir { val annos = state.annotations val destDir = annos.map { case BlackBoxTargetDirAnno(s) => Some(s) - case _ => None + case _ => None }.flatten val loc = { if (destDir.isEmpty) "." else destDir.head } val targetDir = new java.io.File(loc) - if(!targetDir.exists()) FileUtils.makeDirectory(targetDir.getAbsolutePath) + if (!targetDir.exists()) FileUtils.makeDirectory(targetDir.getAbsolutePath) loc } } @@ -53,8 +53,8 @@ case class TechnologyLocationAnnotation(dir: String) extends SingleTargetAnnotat class TechnologyLocation extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters def execute(state: CircuitState): CircuitState = { @@ -65,18 +65,15 @@ class TechnologyLocation extends Transform with DependencyAPIMigration { val annos = state.annotations val dir = annos.flatMap { case TechnologyLocationAnnotation(dir) => Some(dir) - case _ => None + case _ => None } dir.length match { case 0 => "" case 1 => val targetDir = new java.io.File(dir.head) - if(!targetDir.exists()) throw new Exception(s"Technology yaml directory $targetDir doesn't exist!") + if (!targetDir.exists()) throw new Exception(s"Technology yaml directory $targetDir doesn't exist!") dir.head case _ => throw new Exception("Only 1 tech directory annotation allowed!") } } } - - - diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala index a11bfa195..1f628900d 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala @@ -1,5 +1,5 @@ package barstools.tapeout.transforms object LowerName { - def apply(s: String): String = s.replace(".", "_").replace("[", "_")replace("]", "") -} \ No newline at end of file + def apply(s: String): String = s.replace(".", "_").replace("[", "_").replace("]", "") +} diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala index d73d05db0..8025439c0 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala @@ -4,17 +4,17 @@ import chisel3._ import scala.collection.immutable.ListMap class CustomBundle[T <: Data](elts: (String, T)*) extends Record { - val elements = ListMap(elts map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) + val elements = ListMap(elts.map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) def apply(elt: String): T = elements(elt) - def apply(elt: Int): T = elements(elt.toString) + def apply(elt: Int): T = elements(elt.toString) override def cloneType = (new CustomBundle(elements.toList: _*)).asInstanceOf[this.type] } class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { // Must be String, Data - val elements = ListMap(elts map { case (field, elt) => field.toString -> chiselTypeOf(elt) }: _*) + val elements = ListMap(elts.map { case (field, elt) => field.toString -> chiselTypeOf(elt) }: _*) // TODO: Make an equivalent to the below work publicly (or only on subclasses?) - def indexedElements = ListMap(elts map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) + def indexedElements = ListMap(elts.map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) def apply(elt: Int): T = elements(elt.toString) override def cloneType = (new CustomIndexedBundle(indexedElements.toList: _*)).asInstanceOf[this.type] } @@ -22,5 +22,7 @@ class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { object CustomIndexedBundle { def apply[T <: Data](gen: T, idxs: Seq[Int]) = new CustomIndexedBundle(idxs.map(_ -> gen): _*) // Allows Vecs of elements of different types/widths - def apply[T <: Data](gen: Seq[T]) = new CustomIndexedBundle(gen.zipWithIndex.map{ case (elt, field) => field -> elt }: _*) + def apply[T <: Data](gen: Seq[T]) = new CustomIndexedBundle(gen.zipWithIndex.map { case (elt, field) => + field -> elt + }: _*) } diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala index 6754136d6..75b0c330c 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala @@ -4,18 +4,18 @@ import net.jcazevedo.moultingyaml._ import java.io.File class YamlFileReader(resource: String) { - def parse[A](file: String = "")(implicit reader: YamlReader[A]) : Seq[A] = { + def parse[A](file: String = "")(implicit reader: YamlReader[A]): Seq[A] = { // If the user doesn't provide a Yaml file name, use defaults val yamlString = file match { - case f if f.isEmpty => + case f if f.isEmpty => // Use example config if no file is provided val stream = getClass.getResourceAsStream(resource) io.Source.fromInputStream(stream).mkString - case f if new File(f).exists => + case f if new File(f).exists => scala.io.Source.fromFile(f).getLines.mkString("\n") - case _ => + case _ => throw new Exception("No valid Yaml file found!") } yamlString.parseYamls.map(x => reader.read(x)) } -} \ No newline at end of file +} diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala index 9a0a1f6d8..d74360b7f 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala @@ -29,14 +29,17 @@ class ResetNSpec extends FreeSpec with Matchers { "Inverting reset needs to be done throughout module when generating firrtl" in { // generate low-firrtl - val firrtl = (new ChiselStage).execute( - Array("-X", "low"), - Seq(ChiselGeneratorAnnotation(() => new ExampleModuleNeedsResetInverted)) - ).collect { - case EmittedFirrtlCircuitAnnotation(a) => a - case EmittedFirrtlModuleAnnotation(a) => a - }.map(_.value) - .mkString("") + val firrtl = (new ChiselStage) + .execute( + Array("-X", "low"), + Seq(ChiselGeneratorAnnotation(() => new ExampleModuleNeedsResetInverted)) + ) + .collect { + case EmittedFirrtlCircuitAnnotation(a) => a + case EmittedFirrtlModuleAnnotation(a) => a + } + .map(_.value) + .mkString("") firrtl should include("input reset_n :") firrtl should include("node reset = not(reset_n)") diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala index d5168292a..0e164521a 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala @@ -19,18 +19,20 @@ class RetimeSpec extends FlatSpec with Matchers { } def getLowFirrtl[T <: RawModule](gen: () => T, extraArgs: Array[String] = Array.empty): String = { // generate low firrtl - (new ChiselStage).execute( - Array("-X", "low") ++ extraArgs, - Seq(ChiselGeneratorAnnotation(gen)) - ).collect { - case EmittedFirrtlCircuitAnnotation(a) => a - case EmittedFirrtlModuleAnnotation(a) => a - }.map(_.value) - .mkString("") + (new ChiselStage) + .execute( + Array("-X", "low") ++ extraArgs, + Seq(ChiselGeneratorAnnotation(gen)) + ) + .collect { + case EmittedFirrtlCircuitAnnotation(a) => a + case EmittedFirrtlModuleAnnotation(a) => a + } + .map(_.value) + .mkString("") } - - behavior of "retime library" + behavior.of("retime library") it should "pass simple retime module annotation" in { val gen = () => new RetimeModule @@ -41,15 +43,18 @@ class RetimeSpec extends FlatSpec with Matchers { Logger.setOutput(captor.printStream) // generate low firrtl - val firrtl = getLowFirrtl(gen, - Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info")) + val firrtl = getLowFirrtl( + gen, + Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info") + ) firrtl.nonEmpty should be(true) //Make sure we got the RetimeTransform scheduled - captor.getOutputAsString should include ("barstools.tapeout.transforms.retime.RetimeTransform") + captor.getOutputAsString should include("barstools.tapeout.transforms.retime.RetimeTransform") } - val lines = FileUtils.getLines(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json") + val lines = FileUtils + .getLines(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json") .map(normalized) .mkString("\n") lines should include("barstools.tapeout.transforms.retime.RetimeAnnotation") @@ -65,15 +70,18 @@ class RetimeSpec extends FlatSpec with Matchers { Logger.setOutput(captor.printStream) // generate low firrtl - val firrtl = getLowFirrtl(gen, - Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info")) + val firrtl = getLowFirrtl( + gen, + Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info") + ) firrtl.nonEmpty should be(true) //Make sure we got the RetimeTransform scheduled - captor.getOutputAsString should include ("barstools.tapeout.transforms.retime.RetimeTransform") + captor.getOutputAsString should include("barstools.tapeout.transforms.retime.RetimeTransform") } - val lines = FileUtils.getLines(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json") + val lines = FileUtils + .getLines(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json") .map(normalized) .mkString("\n") lines should include("barstools.tapeout.transforms.retime.RetimeAnnotation") From d9d9d0fbb520c53f8be06dba33fd942de7ce0e57 Mon Sep 17 00:00:00 2001 From: chick Date: Wed, 3 Feb 2021 21:03:22 -0800 Subject: [PATCH 200/273] Move to scalatest 3.2 Requires updating to AnyFlatSpec where used And different import for Matchers --- build.sbt | 8 ++------ .../test/scala/barstools/macros/MacroCompilerSpec.scala | 4 +++- .../barstools/tapeout/transforms/ResetInverterSpec.scala | 5 +++-- .../barstools/tapeout/transforms/retime/RetimeSpec.scala | 5 +++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/build.sbt b/build.sbt index fd9638b11..83a5f93f8 100644 --- a/build.sbt +++ b/build.sbt @@ -13,9 +13,8 @@ lazy val commonSettings = Seq( libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) }, - libraryDependencies in Test ++= Seq( - "org.scalatest" %% "scalatest" % "2.2.5" % "test", - "org.scalacheck" %% "scalacheck" % "1.12.4" % "test" + libraryDependencies ++= Seq( + "org.scalatest" %% "scalatest" % "3.2.2" % "test", ), resolvers ++= Seq( Resolver.sonatypeRepo("snapshots"), @@ -31,9 +30,6 @@ lazy val macros = (project in file("macros")) .dependsOn(mdf) .settings(commonSettings) .settings( - libraryDependencies ++= Seq( - "edu.berkeley.cs" %% "firrtl-interpreter" % "1.4.+" % Test - ), mainClass := Some("barstools.macros.MacroCompiler") ) .enablePlugins(sbtassembly.AssemblyPlugin) diff --git a/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala b/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala index 9140ce24d..2b2392272 100644 --- a/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala +++ b/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala @@ -6,10 +6,12 @@ import firrtl.Parser.parse import firrtl.ir.{Circuit, NoInfo} import firrtl.passes.RemoveEmpty import mdf.macrolib.SRAMMacro +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers import java.io.File -abstract class MacroCompilerSpec extends org.scalatest.FlatSpec with org.scalatest.Matchers { +abstract class MacroCompilerSpec extends AnyFlatSpec with Matchers { import scala.language.implicitConversions implicit def String2SomeString(i: String): Option[String] = Some(i) val testDir: String = "test_run_dir/macros" diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala index d74360b7f..701c78453 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala @@ -5,7 +5,8 @@ package barstools.tapeout.transforms import chisel3._ import chisel3.stage.{ChiselGeneratorAnnotation, ChiselStage} import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation} -import org.scalatest.{FreeSpec, Matchers} +import org.scalatest.freespec.AnyFreeSpec +import org.scalatest.matchers.should.Matchers class ExampleModuleNeedsResetInverted extends Module with ResetInverter { val io = IO(new Bundle { @@ -19,7 +20,7 @@ class ExampleModuleNeedsResetInverted extends Module with ResetInverter { invert(this) } -class ResetNSpec extends FreeSpec with Matchers { +class ResetNSpec extends AnyFreeSpec with Matchers { "Inverting reset needs to be done throughout module in Chirrtl" in { val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted) chirrtl should include("input reset :") diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala index 0e164521a..a086b0b95 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala @@ -6,9 +6,10 @@ import chisel3._ import chisel3.stage.{ChiselGeneratorAnnotation, ChiselStage} import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation, FileUtils} import logger.Logger -import org.scalatest.{FlatSpec, Matchers} +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers -class RetimeSpec extends FlatSpec with Matchers { +class RetimeSpec extends AnyFlatSpec with Matchers { def normalized(s: String): String = { require(!s.contains("\n")) s.replaceAll("\\s+", " ").trim From ca4013b830bee30041a81b3916d7785180fb0b82 Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 8 Feb 2021 09:09:19 -0800 Subject: [PATCH 201/273] Remove deprecated Driver stuff macros package - Fix name reference and weird .get.get in CostMetric - Update to DependencyAPIMigration - MacroCompilerTransform - MacroCompilerOptimizations - Delete unused class MacroCompiler - Remove use of ExecutionOptionsManager in object MacroCompiler - Removed stack trace when no arguments from CLI, just give message requiring args - Update version to 0.4-SNAPSHOT --- build.sbt | 2 +- .../scala/barstools/macros/CostMetric.scala | 4 +- .../barstools/macros/MacroCompiler.scala | 63 +++++++++---------- 3 files changed, 33 insertions(+), 36 deletions(-) diff --git a/build.sbt b/build.sbt index 83a5f93f8..d88408656 100644 --- a/build.sbt +++ b/build.sbt @@ -7,7 +7,7 @@ val defaultVersions = Map( lazy val commonSettings = Seq( organization := "edu.berkeley.cs", - version := "0.1-SNAPSHOT", + version := "0.4-SNAPSHOT", scalaVersion := "2.12.10", scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls", "-Xsource:2.11"), libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { diff --git a/macros/src/main/scala/barstools/macros/CostMetric.scala b/macros/src/main/scala/barstools/macros/CostMetric.scala index f39303d30..3b9de124c 100644 --- a/macros/src/main/scala/barstools/macros/CostMetric.scala +++ b/macros/src/main/scala/barstools/macros/CostMetric.scala @@ -186,7 +186,7 @@ object CostMetric { * the metric. */ def registerCostMetric(createFuncHelper: CostMetricCompanion): Unit = { - costMetricCreators.update(createFuncHelper.name, createFuncHelper) + costMetricCreators.update(createFuncHelper.name(), createFuncHelper) } /** Select a cost metric from string. */ @@ -196,7 +196,7 @@ object CostMetric { } else if (!costMetricCreators.contains(m)) { throw new IllegalArgumentException("Invalid cost metric " + m) } else { - costMetricCreators.get(m).get.construct(params) + costMetricCreators(m).construct(params) } } } diff --git a/macros/src/main/scala/barstools/macros/MacroCompiler.scala b/macros/src/main/scala/barstools/macros/MacroCompiler.scala index 5ecfea8f7..bfcf78dad 100644 --- a/macros/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/macros/src/main/scala/barstools/macros/MacroCompiler.scala @@ -8,10 +8,11 @@ package barstools.macros import barstools.macros.Utils._ -import firrtl.CompilerUtils.getLoweringTransforms import firrtl.Utils._ import firrtl.annotations._ import firrtl.ir._ +import firrtl.stage.{FirrtlSourceAnnotation, FirrtlStage, Forms, OutputFileAnnotation, RunFirrtlTransformAnnotation} +import firrtl.transforms.NoDCEAnnotation import firrtl.{PrimOps, _} import mdf.macrolib._ @@ -692,9 +693,11 @@ class MacroCompilerPass( } } -class MacroCompilerTransform extends Transform { - def inputForm = MidForm - def outputForm = MidForm +class MacroCompilerTransform extends Transform with DependencyAPIMigration { + override def prerequisites = Forms.LowForm + override def optionalPrerequisites = Forms.LowFormOptimized + override def optionalPrerequisiteOf = Forms.LowEmitters + override def invalidates(a: Transform) = false def execute(state: CircuitState) = state.annotations.collect { case a: MacroCompilerAnnotation => a } match { case Seq(anno: MacroCompilerAnnotation) => @@ -764,16 +767,16 @@ class MacroCompilerTransform extends Transform { }) ) ) - (transforms.foldLeft(state))((s, xform) => xform.runTransform(s)).copy(form = outputForm) + (transforms.foldLeft(state))((s, xform) => xform.runTransform(s)) case _ => state } } -// FIXME: Use firrtl.LowerFirrtlOptimizations -class MacroCompilerOptimizations extends SeqTransform { - def inputForm: CircuitForm = LowForm - - def outputForm: CircuitForm = LowForm +class MacroCompilerOptimizations extends SeqTransform with DependencyAPIMigration { + override def prerequisites = Forms.LowForm + override def optionalPrerequisites = Forms.LowFormOptimized + override def optionalPrerequisiteOf = Forms.LowEmitters + override def invalidates(a: Transform) = false def transforms: Seq[Transform] = Seq( passes.RemoveValidIf, @@ -786,15 +789,6 @@ class MacroCompilerOptimizations extends SeqTransform { ) } -class MacroCompiler extends Compiler { - def emitter: Emitter = new VerilogEmitter - - def transforms: Seq[Transform] = - Seq(new MacroCompilerTransform) ++ - getLoweringTransforms(firrtl.ChirrtlForm, firrtl.LowForm) ++ - Seq(new MacroCompilerOptimizations) -} - object MacroCompiler extends App { sealed trait MacroParam case object Macros extends MacroParam @@ -890,7 +884,7 @@ object MacroCompiler extends App { MacroCompilerAnnotation( circuit.main, MacroCompilerAnnotation.Params( - params.get(Macros).get, + params(Macros), params.get(MacrosFormat), params.get(Library), params.get(HammerIR), @@ -905,20 +899,20 @@ object MacroCompiler extends App { ) // The actual MacroCompilerTransform basically just generates an input circuit - val macroCompilerInput = CircuitState(circuit, MidForm, annotations) + val macroCompilerInput = CircuitState(circuit, annotations) val macroCompiled = (new MacroCompilerTransform).execute(macroCompilerInput) - // Since the MacroCompiler defines its own CLI, reconcile this with FIRRTL options - val firOptions = new ExecutionOptionsManager("macrocompiler") with HasFirrtlOptions { - firrtlOptions = FirrtlExecutionOptions( - outputFileNameOverride = params.get(Verilog).getOrElse(""), - noDCE = true, - firrtlSource = Some(macroCompiled.circuit.serialize) - ) - } - // Run FIRRTL compiler - Driver.execute(firOptions) + (new FirrtlStage).execute( + Array.empty, + Seq( + OutputFileAnnotation(params.getOrElse(Verilog, "")), + RunFirrtlTransformAnnotation(new VerilogEmitter), + EmitCircuitAnnotation(classOf[VerilogEmitter]), + NoDCEAnnotation, + FirrtlSourceAnnotation(macroCompiled.circuit.serialize) + ) + ) params.get(HammerIR) match { case Some(hammerIRFile: String) => { @@ -947,8 +941,11 @@ object MacroCompiler extends App { } } catch { case e: java.util.NoSuchElementException => - e.printStackTrace() - println(usage) + if (args.isEmpty) { + println("Command line arguments must be specified") + } else { + e.printStackTrace() + } e.printStackTrace() sys.exit(1) case e: MacroCompilerException => From afcdcc6c2d3a1fdf23a339c7ff6406457270175b Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 9 Feb 2021 14:04:24 -0800 Subject: [PATCH 202/273] Modernize deprecated Chisel/Firrtl constructs - Build out a stage for tapeout - Refactor annotation construction - Create a CLI handler - create a TapeOutStage - Remove outputForm reference from EnumerateModules - New GenerateTopAndHarness is fresh implmentation of Generate.scala - GenerateTopSpec is a work in progress --- .../tapeout/transforms/EnumerateModules.scala | 3 +- .../tapeout/transforms/Generate.scala | 548 +++++++++--------- .../transforms/GenerateTopAndHarness.scala | 127 ++++ .../transforms/RemoveUnusedModules.scala | 2 +- .../transforms/stage/TapeoutStage.scala | 181 ++++++ .../tapeout/transforms/GenerateTopSpec.scala | 42 ++ 6 files changed, 626 insertions(+), 277 deletions(-) create mode 100644 tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala create mode 100644 tapeout/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala create mode 100644 tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala index 182b0071b..47dae82c5 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala @@ -35,7 +35,6 @@ class EnumerateModules(enumerate: (Module) => Unit) def transforms: Seq[Transform] = Seq(new EnumerateModulesPass(enumerate)) def execute(state: CircuitState): CircuitState = { - val ret = runTransforms(state) - CircuitState(ret.circuit, outputForm, ret.annotations, ret.renames) + runTransforms(state) } } diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala index 01ea56e09..e20f72e55 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala @@ -1,274 +1,274 @@ -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.annotations._ -import firrtl.ir._ -import firrtl.passes.memlib.ReplSeqMemAnnotation -import firrtl.stage.FirrtlCircuitAnnotation -import firrtl.transforms.BlackBoxResourceFileNameAnno -import logger.LazyLogging - -trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => - var tapeoutOptions = TapeoutOptions() - - parser.note("tapeout options") - - parser - .opt[String]("harness-o") - .abbr("tho") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - harnessOutput = Some(x) - ) - } - .text { - "use this to generate a harness at " - } - - parser - .opt[String]("syn-top") - .abbr("tst") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - synTop = Some(x) - ) - } - .text { - "use this to set synTop" - } - - parser - .opt[String]("top-fir") - .abbr("tsf") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - topFir = Some(x) - ) - } - .text { - "use this to set topFir" - } - - parser - .opt[String]("top-anno-out") - .abbr("tsaof") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - topAnnoOut = Some(x) - ) - } - .text { - "use this to set topAnnoOut" - } - - parser - .opt[String]("top-dotf-out") - .abbr("tdf") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - topDotfOut = Some(x) - ) - } - .text { - "use this to set the filename for the top resource .f file" - } - - parser - .opt[String]("harness-top") - .abbr("tht") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - harnessTop = Some(x) - ) - } - .text { - "use this to set harnessTop" - } - - parser - .opt[String]("harness-fir") - .abbr("thf") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - harnessFir = Some(x) - ) - } - .text { - "use this to set harnessFir" - } - - parser - .opt[String]("harness-anno-out") - .abbr("thaof") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - harnessAnnoOut = Some(x) - ) - } - .text { - "use this to set harnessAnnoOut" - } - - parser - .opt[String]("harness-dotf-out") - .abbr("hdf") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - harnessDotfOut = Some(x) - ) - } - .text { - "use this to set the filename for the harness resource .f file" - } - - parser - .opt[String]("harness-conf") - .abbr("thconf") - .valueName("") - .foreach { x => - tapeoutOptions = tapeoutOptions.copy( - harnessConf = Some(x) - ) - } - .text { - "use this to set the harness conf file location" - } - -} - -case class TapeoutOptions( - harnessOutput: Option[String] = None, - synTop: Option[String] = None, - topFir: Option[String] = None, - topAnnoOut: Option[String] = None, - topDotfOut: Option[String] = None, - harnessTop: Option[String] = None, - harnessFir: Option[String] = None, - harnessAnnoOut: Option[String] = None, - harnessDotfOut: Option[String] = None, - harnessConf: Option[String] = None) - extends LazyLogging - -// Requires two phases, one to collect modules below synTop in the hierarchy -// and a second to remove those modules to generate the test harness -sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => - lazy val optionsManager = { - val optionsManager = new ExecutionOptionsManager("tapeout") with HasFirrtlOptions with HasTapeoutOptions - if (!optionsManager.parse(args)) { - throw new Exception("Error parsing options!") - } - optionsManager - } - lazy val tapeoutOptions = optionsManager.tapeoutOptions - // Tapeout options - lazy val synTop = tapeoutOptions.synTop - lazy val harnessTop = tapeoutOptions.harnessTop - lazy val firrtlOptions = optionsManager.firrtlOptions - // FIRRTL options - lazy val annoFiles = firrtlOptions.annotationFileNames - - // order is determined by DependencyAPIMigration - val topTransforms = Seq( - new ReParentCircuit, - new RemoveUnusedModules - ) - - lazy val rootCircuitTarget = CircuitTarget(harnessTop.get) - - lazy val topAnnos = synTop.map(st => ReParentCircuitAnnotation(rootCircuitTarget.module(st))) ++ - tapeoutOptions.topDotfOut.map(BlackBoxResourceFileNameAnno(_)) - - lazy val topOptions = firrtlOptions.copy( - customTransforms = firrtlOptions.customTransforms ++ topTransforms, - annotations = firrtlOptions.annotations ++ topAnnos - ) - - // order is determined by DependencyAPIMigration - val harnessTransforms = Seq( - new ConvertToExtMod, - new RemoveUnusedModules, - new AvoidExtModuleCollisions, - new AddSuffixToModuleNames - ) - - // Dump firrtl and annotation files - protected def dump(res: FirrtlExecutionSuccess, firFile: Option[String], annoFile: Option[String]): Unit = { - firFile.foreach { firPath => - val outputFile = new java.io.PrintWriter(firPath) - outputFile.write(res.circuitState.circuit.serialize) - outputFile.close() - } - annoFile.foreach { annoPath => - val outputFile = new java.io.PrintWriter(annoPath) - outputFile.write(JsonProtocol.serialize(res.circuitState.annotations.filter(_ match { - case da: DeletedAnnotation => false - case ec: EmittedComponent => false - case ea: EmittedAnnotation[_] => false - case fca: FirrtlCircuitAnnotation => false - case _ => true - }))) - outputFile.close() - } - } - - // Top Generation - protected def executeTop(): Seq[ExtModule] = { - optionsManager.firrtlOptions = topOptions - val result = firrtl.Driver.execute(optionsManager) - result match { - case x: FirrtlExecutionSuccess => - dump(x, tapeoutOptions.topFir, tapeoutOptions.topAnnoOut) - x.circuitState.circuit.modules.collect { case e: ExtModule => e } - case x => - throw new Exception(s"executeTop failed while executing FIRRTL!\n${x}") - } - } - - // Top and harness generation - protected def executeTopAndHarness(): Unit = { - // Execute top and get list of ExtModules to avoid collisions - val topExtModules = executeTop() - - val harnessAnnos = - tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)).toSeq ++ - harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ - synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) :+ - LinkExtModulesAnnotation(topExtModules) - - // For harness run, change some firrtlOptions (below) for harness phase - // customTransforms: setup harness transforms, add AvoidExtModuleCollisions - // outputFileNameOverride: change to harnessOutput - // conf file must change to harnessConf by mapping annotations - optionsManager.firrtlOptions = firrtlOptions.copy( - customTransforms = firrtlOptions.customTransforms ++ harnessTransforms, - outputFileNameOverride = tapeoutOptions.harnessOutput.get, - annotations = firrtlOptions.annotations.map({ - case ReplSeqMemAnnotation(i, o) => ReplSeqMemAnnotation(i, tapeoutOptions.harnessConf.get) - case a => a - }) ++ harnessAnnos - ) - val harnessResult = firrtl.Driver.execute(optionsManager) - harnessResult match { - case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.harnessFir, tapeoutOptions.harnessAnnoOut) - case x => throw new Exception(s"executeHarness failed while executing FIRRTL!\n${x}") - } - } -} - -object GenerateTop extends App with GenerateTopAndHarnessApp { - // Only need a single phase to generate the top module - executeTop() -} - -object GenerateTopAndHarness extends App with GenerateTopAndHarnessApp { - executeTopAndHarness() -} +//package barstools.tapeout.transforms +// +//import firrtl._ +//import firrtl.annotations._ +//import firrtl.ir._ +//import firrtl.passes.memlib.ReplSeqMemAnnotation +//import firrtl.stage.FirrtlCircuitAnnotation +//import firrtl.transforms.BlackBoxResourceFileNameAnno +//import logger.LazyLogging +// +//trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => +// var tapeoutOptions = TapeoutOptions() +// +// parser.note("tapeout options") +// +// parser +// .opt[String]("harness-o") +// .abbr("tho") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// harnessOutput = Some(x) +// ) +// } +// .text { +// "use this to generate a harness at " +// } +// +// parser +// .opt[String]("syn-top") +// .abbr("tst") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// synTop = Some(x) +// ) +// } +// .text { +// "use this to set synTop" +// } +// +// parser +// .opt[String]("top-fir") +// .abbr("tsf") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// topFir = Some(x) +// ) +// } +// .text { +// "use this to set topFir" +// } +// +// parser +// .opt[String]("top-anno-out") +// .abbr("tsaof") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// topAnnoOut = Some(x) +// ) +// } +// .text { +// "use this to set topAnnoOut" +// } +// +// parser +// .opt[String]("top-dotf-out") +// .abbr("tdf") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// topDotfOut = Some(x) +// ) +// } +// .text { +// "use this to set the filename for the top resource .f file" +// } +// +// parser +// .opt[String]("harness-top") +// .abbr("tht") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// harnessTop = Some(x) +// ) +// } +// .text { +// "use this to set harnessTop" +// } +// +// parser +// .opt[String]("harness-fir") +// .abbr("thf") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// harnessFir = Some(x) +// ) +// } +// .text { +// "use this to set harnessFir" +// } +// +// parser +// .opt[String]("harness-anno-out") +// .abbr("thaof") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// harnessAnnoOut = Some(x) +// ) +// } +// .text { +// "use this to set harnessAnnoOut" +// } +// +// parser +// .opt[String]("harness-dotf-out") +// .abbr("hdf") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// harnessDotfOut = Some(x) +// ) +// } +// .text { +// "use this to set the filename for the harness resource .f file" +// } +// +// parser +// .opt[String]("harness-conf") +// .abbr("thconf") +// .valueName("") +// .foreach { x => +// tapeoutOptions = tapeoutOptions.copy( +// harnessConf = Some(x) +// ) +// } +// .text { +// "use this to set the harness conf file location" +// } +// +//} +// +//case class TapeoutOptions( +// harnessOutput: Option[String] = None, +// synTop: Option[String] = None, +// topFir: Option[String] = None, +// topAnnoOut: Option[String] = None, +// topDotfOut: Option[String] = None, +// harnessTop: Option[String] = None, +// harnessFir: Option[String] = None, +// harnessAnnoOut: Option[String] = None, +// harnessDotfOut: Option[String] = None, +// harnessConf: Option[String] = None) +// extends LazyLogging +// +//// Requires two phases, one to collect modules below synTop in the hierarchy +//// and a second to remove those modules to generate the test harness +//sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => +// lazy val optionsManager = { +// val optionsManager = new ExecutionOptionsManager("tapeout") with HasFirrtlOptions with HasTapeoutOptions +// if (!optionsManager.parse(args)) { +// throw new Exception("Error parsing options!") +// } +// optionsManager +// } +// lazy val tapeoutOptions = optionsManager.tapeoutOptions +// // Tapeout options +// lazy val synTop = tapeoutOptions.synTop +// lazy val harnessTop = tapeoutOptions.harnessTop +// lazy val firrtlOptions = optionsManager.firrtlOptions +// // FIRRTL options +// lazy val annoFiles = firrtlOptions.annotationFileNames +// +// // order is determined by DependencyAPIMigration +// val topTransforms = Seq( +// new ReParentCircuit, +// new RemoveUnusedModules +// ) +// +// lazy val rootCircuitTarget = CircuitTarget(harnessTop.get) +// +// lazy val topAnnos = synTop.map(st => ReParentCircuitAnnotation(rootCircuitTarget.module(st))) ++ +// tapeoutOptions.topDotfOut.map(BlackBoxResourceFileNameAnno(_)) +// +// lazy val topOptions = firrtlOptions.copy( +// customTransforms = firrtlOptions.customTransforms ++ topTransforms, +// annotations = firrtlOptions.annotations ++ topAnnos +// ) +// +// // order is determined by DependencyAPIMigration +// val harnessTransforms = Seq( +// new ConvertToExtMod, +// new RemoveUnusedModules, +// new AvoidExtModuleCollisions, +// new AddSuffixToModuleNames +// ) +// +// // Dump firrtl and annotation files +// protected def dump(res: FirrtlExecutionSuccess, firFile: Option[String], annoFile: Option[String]): Unit = { +// firFile.foreach { firPath => +// val outputFile = new java.io.PrintWriter(firPath) +// outputFile.write(res.circuitState.circuit.serialize) +// outputFile.close() +// } +// annoFile.foreach { annoPath => +// val outputFile = new java.io.PrintWriter(annoPath) +// outputFile.write(JsonProtocol.serialize(res.circuitState.annotations.filter(_ match { +// case da: DeletedAnnotation => false +// case ec: EmittedComponent => false +// case ea: EmittedAnnotation[_] => false +// case fca: FirrtlCircuitAnnotation => false +// case _ => true +// }))) +// outputFile.close() +// } +// } +// +// // Top Generation +// protected def executeTop(): Seq[ExtModule] = { +// optionsManager.firrtlOptions = topOptions +// val result = firrtl.Driver.execute(optionsManager) +// result match { +// case x: FirrtlExecutionSuccess => +// dump(x, tapeoutOptions.topFir, tapeoutOptions.topAnnoOut) +// x.circuitState.circuit.modules.collect { case e: ExtModule => e } +// case x => +// throw new Exception(s"executeTop failed while executing FIRRTL!\n${x}") +// } +// } +// +// // Top and harness generation +// protected def executeTopAndHarness(): Unit = { +// // Execute top and get list of ExtModules to avoid collisions +// val topExtModules = executeTop() +// +// val harnessAnnos = +// tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)).toSeq ++ +// harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ +// synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) :+ +// LinkExtModulesAnnotation(topExtModules) +// +// // For harness run, change some firrtlOptions (below) for harness phase +// // customTransforms: setup harness transforms, add AvoidExtModuleCollisions +// // outputFileNameOverride: change to harnessOutput +// // conf file must change to harnessConf by mapping annotations +// optionsManager.firrtlOptions = firrtlOptions.copy( +// customTransforms = firrtlOptions.customTransforms ++ harnessTransforms, +// outputFileNameOverride = tapeoutOptions.harnessOutput.get, +// annotations = firrtlOptions.annotations.map({ +// case ReplSeqMemAnnotation(i, o) => ReplSeqMemAnnotation(i, tapeoutOptions.harnessConf.get) +// case a => a +// }) ++ harnessAnnos +// ) +// val harnessResult = firrtl.Driver.execute(optionsManager) +// harnessResult match { +// case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.harnessFir, tapeoutOptions.harnessAnnoOut) +// case x => throw new Exception(s"executeHarness failed while executing FIRRTL!\n${x}") +// } +// } +//} +// +////object GenerateTop extends App with GenerateTopAndHarnessApp { +//// // Only need a single phase to generate the top module +//// executeTop() +////} +//// +////object GenerateTopAndHarness extends App with GenerateTopAndHarnessApp { +//// executeTopAndHarness() +////} diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala new file mode 100644 index 000000000..108d3c139 --- /dev/null +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -0,0 +1,127 @@ +package barstools.tapeout.transforms + +import barstools.tapeout.transforms.stage._ +import firrtl._ +import firrtl.annotations._ +import firrtl.ir._ +import firrtl.options.{Dependency, InputAnnotationFileAnnotation, StageMain} +import firrtl.passes.memlib.ReplSeqMemAnnotation +import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, OutputFileAnnotation, RunFirrtlTransformAnnotation} +import firrtl.transforms.BlackBoxResourceFileNameAnno +import logger.LazyLogging + +// Requires two phases, one to collect modules below synTop in the hierarchy +// and a second to remove those modules to generate the test harness +private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogging { + val synTop: Option[String] = annotations.collectFirst { case SynTopAnnotation(s) => s } + val topFir: Option[String] = annotations.collectFirst { case TopFirAnnotation(s) => s } + val topAnnoOut: Option[String] = annotations.collectFirst { case TopAnnoOutAnnotation(s) => s } + val harnessTop: Option[String] = annotations.collectFirst { case HarnessTopAnnotation(h) => h } + val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } + val harnessOutput: Option[String] = annotations.collectFirst { case HarnessOutputAnnotation(h) => h } + val topDotfOut: Option[String] = annotations.collectFirst { case TopDotfOutAnnotation(h) => h } + val harnessDotfOut: Option[String] = annotations.collectFirst { case HarnessDotfOutAnnotation(h) => h } + + val annoFiles: List[String] = annotations.flatMap { + case InputAnnotationFileAnnotation(f) => Some(f) + case _ => None + }.toList + + // order is determined by DependencyAPIMigration + val topTransforms = Seq( + new ReParentCircuit, + new RemoveUnusedModules + ) + + lazy val rootCircuitTarget = CircuitTarget(harnessTop.get) + + val topAnnos = synTop.map(st => ReParentCircuitAnnotation(rootCircuitTarget.module(st))) ++ + topDotfOut.map(BlackBoxResourceFileNameAnno) + + // order is determined by DependencyAPIMigration + val harnessTransforms = Seq( + new ConvertToExtMod, + new RemoveUnusedModules, + new AvoidExtModuleCollisions, + new AddSuffixToModuleNames + ) + + // Dump firrtl and annotation files + protected def dump( + circuit: Circuit, + annotations: AnnotationSeq, + firFile: Option[String], + annoFile: Option[String] + ): Unit = { + firFile.foreach { firPath => + val outputFile = new java.io.PrintWriter(firPath) + outputFile.write(circuit.serialize) + outputFile.close() + } + annoFile.foreach { annoPath => + val outputFile = new java.io.PrintWriter(annoPath) + outputFile.write(JsonProtocol.serialize(annotations.filter(_ match { + case _: DeletedAnnotation => false + case _: EmittedComponent => false + case _: EmittedAnnotation[_] => false + case _: FirrtlCircuitAnnotation => false + case _ => true + }))) + outputFile.close() + } + } + + // Top Generation + def executeTop(): Seq[ExtModule] = { + val annos = new FirrtlStage().execute(Array.empty, annotations) + annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { + case Some(circuit) => + dump(circuit, annos, topFir, topAnnoOut) + circuit.modules.collect { case e: ExtModule => e } + case _ => + throw new Exception(s"executeTop failed while executing FIRRTL!\n") + } + } + + // Top and harness generation + def executeTopAndHarness(): Unit = { + // Execute top and get list of ExtModules to avoid collisions + val topExtModules = executeTop() + + val harnessAnnos = + harnessDotfOut.map(BlackBoxResourceFileNameAnno).toSeq ++ + harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ + synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) ++ + Seq( + LinkExtModulesAnnotation(topExtModules), + RunFirrtlTransformAnnotation(Dependency[ConvertToExtMod]), + RunFirrtlTransformAnnotation(Dependency[RemoveUnusedModules]), + RunFirrtlTransformAnnotation(Dependency[AvoidExtModuleCollisions]), + RunFirrtlTransformAnnotation(Dependency[AddSuffixToModuleNames]) + ) + + // For harness run, change some firrtlOptions (below) for harness phase + // customTransforms: setup harness transforms, add AvoidExtModuleCollisions + // outputFileNameOverride: change to harnessOutput + // conf file must change to harnessConf by mapping annotations + + val generatorAnnotations = annotations.map { + case ReplSeqMemAnnotation(i, _) => ReplSeqMemAnnotation(i, harnessConf.get) + case HarnessOutputAnnotation(s) => OutputFileAnnotation(s) + case anno => anno + } ++ harnessAnnos + + val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) + annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { + case Some(circuit) => + dump(circuit, annos, topFir, topAnnoOut) + case _ => + throw new Exception(s"executeTop failed while executing FIRRTL!\n") + } + } +} + + +object GenerateTop extends StageMain(new TapeoutStage(doHarness = false)) + +object GenerateTopAndHarness extends StageMain(new TapeoutStage(doHarness = true)) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala index 5d1cbc6cd..d6d7b80d2 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala @@ -23,7 +23,7 @@ class RemoveUnusedModules extends Transform with DependencyAPIMigration { def execute(state: CircuitState): CircuitState = { val modulesByName = state.circuit.modules.map { - case m: Module => (m.name, Some(m)) + case m: Module => (m.name, Some(m)) case m: ExtModule => (m.name, None) }.toMap diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala new file mode 100644 index 000000000..1c50a82e4 --- /dev/null +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -0,0 +1,181 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms.stage + +import barstools.tapeout.transforms.GenerateTopAndHarness +import chisel3.stage.ChiselCli +import firrtl.AnnotationSeq +import firrtl.annotations.{Annotation, NoTargetAnnotation} +import firrtl.options.{HasShellOptions, Shell, ShellOption, Stage, Unserializable} +import firrtl.stage.FirrtlCli +import logger.Logger + +sealed trait TapeoutOption extends Unserializable { + this: Annotation => +} + +case class HarnessOutputAnnotation(harnessOutput: String) extends NoTargetAnnotation with TapeoutOption + +object HarnessOutputAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "harness-o", + shortOption = Some("tho"), + toAnnotationSeq = (s: String) => Seq(HarnessOutputAnnotation(s)), + helpText = "use this to generate a harness at " + ) + ) +} + +case class SynTopAnnotation(synTop: String) extends NoTargetAnnotation with TapeoutOption + +object SynTopAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "syn-top", + shortOption = Some("tst"), + toAnnotationSeq = (s: String) => Seq(SynTopAnnotation(s)), + helpText = "use this to set synTop" + ) + ) +} + +case class TopFirAnnotation(topFir: String) extends NoTargetAnnotation with TapeoutOption + +object TopFirAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "top-fir", + shortOption = Some("tsf"), + toAnnotationSeq = (s: String) => Seq(TopFirAnnotation(s)), + helpText = "use this to set topFir" + ) + ) +} + +case class TopAnnoOutAnnotation(topAnnoOut: String) extends NoTargetAnnotation with TapeoutOption + +object TopAnnoOutAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "top-anno-out", + shortOption = Some("tsaof"), + toAnnotationSeq = (s: String) => Seq(TopAnnoOutAnnotation(s)), + helpText = "use this to set topAnnoOut" + ) + ) +} + +case class TopDotfOutAnnotation(topDotfOut: String) extends NoTargetAnnotation with TapeoutOption + +object TopDotfOutAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "top-dotf-out", + shortOption = Some("tdf"), + toAnnotationSeq = (s: String) => Seq(TopDotfOutAnnotation(s)), + helpText = "use this to set the filename for the top resource .f file" + ) + ) +} + +case class HarnessTopAnnotation(harnessTop: String) extends NoTargetAnnotation with TapeoutOption + +object HarnessTopAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "harness-top", + shortOption = Some("tht"), + toAnnotationSeq = (s: String) => Seq(HarnessTopAnnotation(s)), + helpText = "use this to set harnessTop" + ) + ) +} + +case class HarnessFirAnnotation(harnessFir: String) extends NoTargetAnnotation with TapeoutOption + +object HarnessFirAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "harness-fir", + shortOption = Some("thf"), + toAnnotationSeq = (s: String) => Seq(HarnessFirAnnotation(s)), + helpText = "use this to set harnessFir" + ) + ) +} + +case class HarnessAnnoOutAnnotation(harnessAnnoOut: String) extends NoTargetAnnotation with TapeoutOption + +object HarnessAnnoOutAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "harness-anno-out", + shortOption = Some("thaof"), + toAnnotationSeq = (s: String) => Seq(HarnessAnnoOutAnnotation(s)), + helpText = "use this to set harnessAnnoOut" + ) + ) +} + +case class HarnessDotfOutAnnotation(harnessDotfOut: String) extends NoTargetAnnotation with TapeoutOption + +object HarnessDotfOutAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "harness-dotf-out", + shortOption = Some("hdf"), + toAnnotationSeq = (s: String) => Seq(HarnessDotfOutAnnotation(s)), + helpText = "use this to set the filename for the harness resource .f file" + ) + ) +} + +case class HarnessConfAnnotation(harnessConf: String) extends NoTargetAnnotation with TapeoutOption + +object HarnessConfAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "harness-conf", + shortOption = Some("thconf"), + toAnnotationSeq = (s: String) => Seq(HarnessConfAnnotation(s)), + helpText = "use this to set the harness conf file location" + ) + ) +} + +trait TapeoutCli { + this: Shell => + parser.note("Tapeout specific options") + + Seq( + HarnessOutputAnnotation, + SynTopAnnotation, + TopFirAnnotation, + TopAnnoOutAnnotation, + TopDotfOutAnnotation, + HarnessTopAnnotation, + HarnessFirAnnotation, + HarnessAnnoOutAnnotation, + HarnessDotfOutAnnotation, + HarnessConfAnnotation + ).foreach(_.addOptions(parser)) +} + +class TapeoutStage(doHarness: Boolean) extends Stage { + override val shell: Shell = new Shell(applicationName = "tapeout") with TapeoutCli with ChiselCli with FirrtlCli + + override def run(annotations: AnnotationSeq): AnnotationSeq = { + Logger.makeScope(annotations) { + val generator = new GenerateTopAndHarness(annotations) + + if (doHarness) { + generator.executeTopAndHarness() + } else { + generator.executeTop() + } + } + annotations + } +} + diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala new file mode 100644 index 000000000..db84230d1 --- /dev/null +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: Apache-2.0 + +package barstools.tapeout.transforms + +import org.scalatest.freespec.AnyFreeSpec +import org.scalatest.matchers.should.Matchers + +import java.io.{ByteArrayOutputStream, PrintStream} + +class GenerateTopSpec extends AnyFreeSpec with Matchers { + "Generate top and harness" - { + "should include the following transforms" in { + val buffer = new ByteArrayOutputStream() + Console.withOut(new PrintStream(buffer)) { + GenerateTopAndHarness.main(Array("-i", "ExampleModuleNeedsResetInverted.fir", "-ll", "info")) + } + val output = buffer.toString + output should include("barstools.tapeout.transforms.AddSuffixToModuleNames") + output should include("barstools.tapeout.transforms.ConvertToExtMod") + output should include("barstools.tapeout.transforms.RemoveUnusedModules") + output should include("barstools.tapeout.transforms.AvoidExtModuleCollisions") + println(output) + } + } + + "generate harness should " in { + val buffer = new ByteArrayOutputStream() + Console.withOut(new PrintStream(buffer)) { + GenerateTopAndHarness.main( + Array( + "--target-dir", "test_run_dir/generate_top_spec", + "-i", "/Users/chick/Adept/dev/masters/barstools/tapeout/src/test/resources/BlackBoxFloatTester.fir", +// "-X", "low", +// "-ll", "info", +// "--help" + ) + ) + } + val output = buffer.toString + println(output) + } +} From e650d5ba22676cde57ff99239b57f96330950b99 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 9 Feb 2021 14:12:57 -0800 Subject: [PATCH 203/273] - changed directory path to iocells to use directories rather than dotted name - Changed packages in tapeout/transforms/utils/*.scala to be in their own package `utils` --- .../iocell/chisel}/Analog.scala | 0 .../iocell/chisel}/IOCell.scala | 0 .../scala/barstools/tapeout/transforms/utils/FileUtils.scala | 4 ++-- .../barstools/tapeout/transforms/utils/LowerAnnotations.scala | 2 +- .../tapeout/transforms/utils/ProgrammaticBundle.scala | 3 ++- .../barstools/tapeout/transforms/utils/YamlHelpers.scala | 3 ++- 6 files changed, 7 insertions(+), 5 deletions(-) rename iocell/src/main/scala/{barstools.iocell.chisel => barstools/iocell/chisel}/Analog.scala (100%) rename iocell/src/main/scala/{barstools.iocell.chisel => barstools/iocell/chisel}/IOCell.scala (100%) diff --git a/iocell/src/main/scala/barstools.iocell.chisel/Analog.scala b/iocell/src/main/scala/barstools/iocell/chisel/Analog.scala similarity index 100% rename from iocell/src/main/scala/barstools.iocell.chisel/Analog.scala rename to iocell/src/main/scala/barstools/iocell/chisel/Analog.scala diff --git a/iocell/src/main/scala/barstools.iocell.chisel/IOCell.scala b/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala similarity index 100% rename from iocell/src/main/scala/barstools.iocell.chisel/IOCell.scala rename to iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala index 6e79b7e5f..86bf43de2 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala @@ -1,8 +1,8 @@ // See LICENSE for license details. -package barstools.tapeout.transforms +package barstools.tapeout.transforms.utils -import chisel3.experimental.{annotate, ChiselAnnotation} +import chisel3.experimental.{ChiselAnnotation, annotate} import firrtl._ import firrtl.annotations._ import firrtl.stage.Forms diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala index 1f628900d..45502d6d4 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala @@ -1,4 +1,4 @@ -package barstools.tapeout.transforms +package barstools.tapeout.transforms.utils object LowerName { def apply(s: String): String = s.replace(".", "_").replace("[", "_").replace("]", "") diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala index 8025439c0..66200e617 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala @@ -1,6 +1,7 @@ -package barstools.tapeout.transforms +package barstools.tapeout.transforms.utils import chisel3._ + import scala.collection.immutable.ListMap class CustomBundle[T <: Data](elts: (String, T)*) extends Record { diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala index 75b0c330c..9a226de57 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala @@ -1,6 +1,7 @@ -package barstools.tapeout.transforms +package barstools.tapeout.transforms.utils import net.jcazevedo.moultingyaml._ + import java.io.File class YamlFileReader(resource: String) { From 5616b9d68fe6579b93bc7dae5d4dc2bb6ecc19a7 Mon Sep 17 00:00:00 2001 From: chick Date: Sun, 14 Feb 2021 13:25:16 -0800 Subject: [PATCH 204/273] - remove unused harnessTransforms - --- .../transforms/GenerateTopAndHarness.scala | 9 +- .../tapeout/transforms/GenerateSpec.scala | 93 +++++++++++++++++++ .../tapeout/transforms/GenerateTopSpec.scala | 47 ++++++++-- 3 files changed, 131 insertions(+), 18 deletions(-) create mode 100644 tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 108d3c139..35e364c03 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -38,14 +38,6 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg val topAnnos = synTop.map(st => ReParentCircuitAnnotation(rootCircuitTarget.module(st))) ++ topDotfOut.map(BlackBoxResourceFileNameAnno) - // order is determined by DependencyAPIMigration - val harnessTransforms = Seq( - new ConvertToExtMod, - new RemoveUnusedModules, - new AvoidExtModuleCollisions, - new AddSuffixToModuleNames - ) - // Dump firrtl and annotation files protected def dump( circuit: Circuit, @@ -88,6 +80,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // Execute top and get list of ExtModules to avoid collisions val topExtModules = executeTop() + // order is determined by DependencyAPIMigration val harnessAnnos = harnessDotfOut.map(BlackBoxResourceFileNameAnno).toSeq ++ harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala new file mode 100644 index 000000000..cefd97595 --- /dev/null +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: Apache-2.0 + +package barstools.tapeout.transforms + +import chisel3._ +import chisel3.experimental.ExtModule +import chisel3.stage.ChiselStage +import firrtl.FileUtils +import org.scalatest.freespec.AnyFreeSpec + +import java.io.{File, PrintWriter} + +class BlackBoxInverter extends ExtModule { + val in = IO(Input(Bool())) + val out = IO(Output(Bool())) +} + +class GenerateExampleModule extends MultiIOModule { + val in = IO(Input(Bool())) + val out = IO(Output(Bool())) + + val inverter = Module(new BlackBoxInverter) + inverter.in := in + val inverted = inverter.out + + val reg = RegInit(0.U(8.W)) + reg := reg + inverted.asUInt + out := reg +} + +class ToBeMadeExternal extends MultiIOModule { + val in = IO(Input(Bool())) + val out = IO(Output(Bool())) + + val reg = RegInit(0.U(8.W)) + reg := reg + in.asUInt + 2.U + out := reg +} + +class GenerateExampleTester extends MultiIOModule { + val success = IO(Output(Bool())) + + val mod = Module(new GenerateExampleModule) + mod.in := 1.U + + val mod2 = Module(new ToBeMadeExternal) + mod2.in := 1.U + + val reg = RegInit(0.U(8.W)) + reg := reg + mod.out + mod2.out + + success := reg === 100.U + + when(reg === 100.U) { + stop() + } +} + +class GenerateSpec extends AnyFreeSpec { + "generate test data" in { + val targetDir = "test_run_dir/generate_spec_source" + FileUtils.makeDirectory(targetDir) + + val printWriter = new PrintWriter(new File(s"$targetDir/GenerateExampleTester.fir")) + printWriter.write((new ChiselStage()).emitFirrtl(new GenerateExampleTester)) + printWriter.close() + + val blackBoxInverterText = """ + |module BlackBoxInverter( + | input [0:0] in, + | output [0:0] out + |); + | assign out = !in; + |endmodule + |""".stripMargin + + val printWriter2 = new PrintWriter(new File(s"$targetDir/BlackBoxInverter.v")) + printWriter2.write(blackBoxInverterText) + printWriter2.close() + + + } + + "generate top test" in { + val sourceDir = "test_run_dir/generate_spec_source" + val targetDir = "test_run_dir/generate_spec" + + GenerateTop.main(Array( + "-i", s"$sourceDir/GenerateExampleTester.fir", + "-o", s"$targetDir/GenerateExampleTester.v" + )) + } +} diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala index db84230d1..25ddbd9bf 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala @@ -2,10 +2,11 @@ package barstools.tapeout.transforms +import firrtl.FileUtils import org.scalatest.freespec.AnyFreeSpec import org.scalatest.matchers.should.Matchers -import java.io.{ByteArrayOutputStream, PrintStream} +import java.io.{ByteArrayOutputStream, File, PrintStream, PrintWriter} class GenerateTopSpec extends AnyFreeSpec with Matchers { "Generate top and harness" - { @@ -24,19 +25,45 @@ class GenerateTopSpec extends AnyFreeSpec with Matchers { } "generate harness should " in { - val buffer = new ByteArrayOutputStream() - Console.withOut(new PrintStream(buffer)) { + val targetDir = "test_run_dir/generate_top_spec" + FileUtils.makeDirectory(targetDir) + + val stream = getClass.getResourceAsStream("/BlackBoxFloatTester.fir") + val input = scala.io.Source.fromInputStream(stream).getLines() + val printWriter = new PrintWriter(new File(s"$targetDir/BlackBoxFloatTester.fir")) + printWriter.write(input.mkString("\n")) + printWriter.close() + + println(s"""Resource: ${input.mkString("\n")}""") + + +// val buffer = new ByteArrayOutputStream() +// Console.withOut(new PrintStream(buffer)) { GenerateTopAndHarness.main( Array( "--target-dir", "test_run_dir/generate_top_spec", - "-i", "/Users/chick/Adept/dev/masters/barstools/tapeout/src/test/resources/BlackBoxFloatTester.fir", -// "-X", "low", -// "-ll", "info", -// "--help" + "-i", s"$targetDir/BlackBoxFloatTester.fir", + "-o", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.v", + "-tho", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.v", + "-i", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.fir", + "--syn-top", "UnitTestSuite", + "--harness-top", "TestHarness", + "-faf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.anno.json", + "-tsaof", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.anno.json", + "-tdf", "firrtl_black_box_resource_files.top.f", + "-tsf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.fir", + "-thaof", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.anno.json", + "-hdf", "firrtl_black_box_resource_files.harness.f", + "-thf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.fir", + "--infer-rw", + "--repl-seq-mem", "-c:TestHarness:-o:chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.mems.conf", + "-thconf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.mems.conf", + "-td", "test_run_dir/from-ci", + "-ll", "info" ) ) } - val output = buffer.toString - println(output) - } +// val output = buffer.toString +// println(output) } From c052f793927923943e98748cd3ad2ed3e9dcbe69 Mon Sep 17 00:00:00 2001 From: chick Date: Sun, 14 Feb 2021 14:44:36 -0800 Subject: [PATCH 205/273] - Add rocketchip dependency to try and fix run problem in chipyard sims/vcs --- build.sbt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/build.sbt b/build.sbt index d88408656..c78e638c7 100644 --- a/build.sbt +++ b/build.sbt @@ -2,7 +2,8 @@ val defaultVersions = Map( "chisel3" -> "3.4.+", - "chisel-iotesters" -> "1.5.+" + "chisel-iotesters" -> "1.5.+", + "rocketchip" -> "1.2.+" ) lazy val commonSettings = Seq( @@ -10,7 +11,7 @@ lazy val commonSettings = Seq( version := "0.4-SNAPSHOT", scalaVersion := "2.12.10", scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls", "-Xsource:2.11"), - libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { + libraryDependencies ++= Seq("chisel3","chisel-iotesters", "rocketchip").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) }, libraryDependencies ++= Seq( From 055800898de2fbebcd5751d071492f49c5a41a9b Mon Sep 17 00:00:00 2001 From: chick Date: Sun, 14 Feb 2021 16:18:04 -0800 Subject: [PATCH 206/273] - Don't carry over OutputFileAnnotaton to the harness phase of GenerateTopAndHarness --- .../barstools/tapeout/transforms/GenerateTopAndHarness.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 35e364c03..dd9a5ac1b 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -98,7 +98,9 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // outputFileNameOverride: change to harnessOutput // conf file must change to harnessConf by mapping annotations - val generatorAnnotations = annotations.map { + val generatorAnnotations = annotations + .filterNot(_.isInstanceOf[OutputFileAnnotation]) + .map { case ReplSeqMemAnnotation(i, _) => ReplSeqMemAnnotation(i, harnessConf.get) case HarnessOutputAnnotation(s) => OutputFileAnnotation(s) case anno => anno From 5040e0dcbfce7d274f3b6407aa188fa7efdf75b1 Mon Sep 17 00:00:00 2001 From: chick Date: Sun, 14 Feb 2021 16:54:25 -0800 Subject: [PATCH 207/273] - Pull rocket dependency back out --- build.sbt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/build.sbt b/build.sbt index c78e638c7..d88408656 100644 --- a/build.sbt +++ b/build.sbt @@ -2,8 +2,7 @@ val defaultVersions = Map( "chisel3" -> "3.4.+", - "chisel-iotesters" -> "1.5.+", - "rocketchip" -> "1.2.+" + "chisel-iotesters" -> "1.5.+" ) lazy val commonSettings = Seq( @@ -11,7 +10,7 @@ lazy val commonSettings = Seq( version := "0.4-SNAPSHOT", scalaVersion := "2.12.10", scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls", "-Xsource:2.11"), - libraryDependencies ++= Seq("chisel3","chisel-iotesters", "rocketchip").map { + libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) }, libraryDependencies ++= Seq( From 7c2d7abbe1cd30ec3a97da0961ea5800abf91143 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 16 Feb 2021 14:43:58 -0800 Subject: [PATCH 208/273] Add in missing transforms --- .../barstools/tapeout/transforms/GenerateTopAndHarness.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index dd9a5ac1b..7a35c4d99 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -65,7 +65,10 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // Top Generation def executeTop(): Seq[ExtModule] = { - val annos = new FirrtlStage().execute(Array.empty, annotations) + val annos = new FirrtlStage().execute(Array.empty, annotations ++ Seq( + RunFirrtlTransformAnnotation(Dependency[ReParentCircuit]), + RunFirrtlTransformAnnotation(Dependency[RemoveUnusedModules]) + )) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => dump(circuit, annos, topFir, topAnnoOut) From bbc8800840b32a4fb53487519a0f9223a198a352 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 16 Feb 2021 16:29:41 -0800 Subject: [PATCH 209/273] Get topAnnos into the mix --- .../transforms/GenerateTopAndHarness.scala | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 7a35c4d99..4cb195927 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -27,12 +27,6 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg case _ => None }.toList - // order is determined by DependencyAPIMigration - val topTransforms = Seq( - new ReParentCircuit, - new RemoveUnusedModules - ) - lazy val rootCircuitTarget = CircuitTarget(harnessTop.get) val topAnnos = synTop.map(st => ReParentCircuitAnnotation(rootCircuitTarget.module(st))) ++ @@ -65,10 +59,14 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // Top Generation def executeTop(): Seq[ExtModule] = { - val annos = new FirrtlStage().execute(Array.empty, annotations ++ Seq( - RunFirrtlTransformAnnotation(Dependency[ReParentCircuit]), - RunFirrtlTransformAnnotation(Dependency[RemoveUnusedModules]) - )) + val annos = new FirrtlStage().execute( + Array.empty, + annotations ++ Seq( + RunFirrtlTransformAnnotation(Dependency[ReParentCircuit]), + RunFirrtlTransformAnnotation(Dependency[RemoveUnusedModules]) + ) ++ + topAnnos + ) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => dump(circuit, annos, topFir, topAnnoOut) From 8a93d8b2d7cd29f113b903bf2bd4094cd69acf2b Mon Sep 17 00:00:00 2001 From: chick Date: Fri, 19 Feb 2021 14:48:48 -0800 Subject: [PATCH 210/273] Ignore GenerateTopAndHarness test for now --- .../scala/barstools/tapeout/transforms/GenerateTopSpec.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala index 25ddbd9bf..02afa79af 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala @@ -20,11 +20,10 @@ class GenerateTopSpec extends AnyFreeSpec with Matchers { output should include("barstools.tapeout.transforms.ConvertToExtMod") output should include("barstools.tapeout.transforms.RemoveUnusedModules") output should include("barstools.tapeout.transforms.AvoidExtModuleCollisions") - println(output) } } - "generate harness should " in { + "generate harness should " ignore { val targetDir = "test_run_dir/generate_top_spec" FileUtils.makeDirectory(targetDir) From ddea19825db5430a69dc8943b5882d897c57c88c Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Mon, 22 Feb 2021 01:30:47 -0800 Subject: [PATCH 211/273] Macrocompiler should prioritize memories with no masks with DefaultCostMetric --- macros/src/main/scala/CostMetric.scala | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/macros/src/main/scala/CostMetric.scala b/macros/src/main/scala/CostMetric.scala index b80324aa2..9283143ab 100644 --- a/macros/src/main/scala/CostMetric.scala +++ b/macros/src/main/scala/CostMetric.scala @@ -124,6 +124,10 @@ object DefaultMetric extends CostMetric with CostMetricCompanion { else (mem.src.width/p)*m //Waste the extra maskbits } } + val maskPenalty = (memMask, libMask) match { + case (None, Some(m)) => 0.001 + case (_, _) => 0 + } val depthCost = math.ceil(mem.src.depth.toDouble / lib.src.depth.toDouble) val widthCost = math.ceil(memWidth.toDouble / lib.src.width.toDouble) val bitsCost = (lib.src.depth * lib.src.width).toDouble @@ -132,7 +136,7 @@ object DefaultMetric extends CostMetric with CostMetricCompanion { val bitsWasted = depthCost*widthCost*bitsCost - requestedBits val wastedConst = 0.05 // 0 means waste as few bits with no regard for instance count val costPerInst = wastedConst*depthCost*widthCost - Some(1.0*bitsWasted/requestedBits+costPerInst) + Some(1.0*bitsWasted/requestedBits+costPerInst + maskPenalty) } override def commandLineParams = Map() From a3711c4e19911b57b21bdcf8d459d19795f3201d Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 22 Feb 2021 11:39:47 -0800 Subject: [PATCH 212/273] Remove fully commented out original file Generate.scala --- .../tapeout/transforms/Generate.scala | 274 ------------------ 1 file changed, 274 deletions(-) delete mode 100644 tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala deleted file mode 100644 index e20f72e55..000000000 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/Generate.scala +++ /dev/null @@ -1,274 +0,0 @@ -//package barstools.tapeout.transforms -// -//import firrtl._ -//import firrtl.annotations._ -//import firrtl.ir._ -//import firrtl.passes.memlib.ReplSeqMemAnnotation -//import firrtl.stage.FirrtlCircuitAnnotation -//import firrtl.transforms.BlackBoxResourceFileNameAnno -//import logger.LazyLogging -// -//trait HasTapeoutOptions { self: ExecutionOptionsManager with HasFirrtlOptions => -// var tapeoutOptions = TapeoutOptions() -// -// parser.note("tapeout options") -// -// parser -// .opt[String]("harness-o") -// .abbr("tho") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// harnessOutput = Some(x) -// ) -// } -// .text { -// "use this to generate a harness at " -// } -// -// parser -// .opt[String]("syn-top") -// .abbr("tst") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// synTop = Some(x) -// ) -// } -// .text { -// "use this to set synTop" -// } -// -// parser -// .opt[String]("top-fir") -// .abbr("tsf") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// topFir = Some(x) -// ) -// } -// .text { -// "use this to set topFir" -// } -// -// parser -// .opt[String]("top-anno-out") -// .abbr("tsaof") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// topAnnoOut = Some(x) -// ) -// } -// .text { -// "use this to set topAnnoOut" -// } -// -// parser -// .opt[String]("top-dotf-out") -// .abbr("tdf") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// topDotfOut = Some(x) -// ) -// } -// .text { -// "use this to set the filename for the top resource .f file" -// } -// -// parser -// .opt[String]("harness-top") -// .abbr("tht") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// harnessTop = Some(x) -// ) -// } -// .text { -// "use this to set harnessTop" -// } -// -// parser -// .opt[String]("harness-fir") -// .abbr("thf") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// harnessFir = Some(x) -// ) -// } -// .text { -// "use this to set harnessFir" -// } -// -// parser -// .opt[String]("harness-anno-out") -// .abbr("thaof") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// harnessAnnoOut = Some(x) -// ) -// } -// .text { -// "use this to set harnessAnnoOut" -// } -// -// parser -// .opt[String]("harness-dotf-out") -// .abbr("hdf") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// harnessDotfOut = Some(x) -// ) -// } -// .text { -// "use this to set the filename for the harness resource .f file" -// } -// -// parser -// .opt[String]("harness-conf") -// .abbr("thconf") -// .valueName("") -// .foreach { x => -// tapeoutOptions = tapeoutOptions.copy( -// harnessConf = Some(x) -// ) -// } -// .text { -// "use this to set the harness conf file location" -// } -// -//} -// -//case class TapeoutOptions( -// harnessOutput: Option[String] = None, -// synTop: Option[String] = None, -// topFir: Option[String] = None, -// topAnnoOut: Option[String] = None, -// topDotfOut: Option[String] = None, -// harnessTop: Option[String] = None, -// harnessFir: Option[String] = None, -// harnessAnnoOut: Option[String] = None, -// harnessDotfOut: Option[String] = None, -// harnessConf: Option[String] = None) -// extends LazyLogging -// -//// Requires two phases, one to collect modules below synTop in the hierarchy -//// and a second to remove those modules to generate the test harness -//sealed trait GenerateTopAndHarnessApp extends LazyLogging { this: App => -// lazy val optionsManager = { -// val optionsManager = new ExecutionOptionsManager("tapeout") with HasFirrtlOptions with HasTapeoutOptions -// if (!optionsManager.parse(args)) { -// throw new Exception("Error parsing options!") -// } -// optionsManager -// } -// lazy val tapeoutOptions = optionsManager.tapeoutOptions -// // Tapeout options -// lazy val synTop = tapeoutOptions.synTop -// lazy val harnessTop = tapeoutOptions.harnessTop -// lazy val firrtlOptions = optionsManager.firrtlOptions -// // FIRRTL options -// lazy val annoFiles = firrtlOptions.annotationFileNames -// -// // order is determined by DependencyAPIMigration -// val topTransforms = Seq( -// new ReParentCircuit, -// new RemoveUnusedModules -// ) -// -// lazy val rootCircuitTarget = CircuitTarget(harnessTop.get) -// -// lazy val topAnnos = synTop.map(st => ReParentCircuitAnnotation(rootCircuitTarget.module(st))) ++ -// tapeoutOptions.topDotfOut.map(BlackBoxResourceFileNameAnno(_)) -// -// lazy val topOptions = firrtlOptions.copy( -// customTransforms = firrtlOptions.customTransforms ++ topTransforms, -// annotations = firrtlOptions.annotations ++ topAnnos -// ) -// -// // order is determined by DependencyAPIMigration -// val harnessTransforms = Seq( -// new ConvertToExtMod, -// new RemoveUnusedModules, -// new AvoidExtModuleCollisions, -// new AddSuffixToModuleNames -// ) -// -// // Dump firrtl and annotation files -// protected def dump(res: FirrtlExecutionSuccess, firFile: Option[String], annoFile: Option[String]): Unit = { -// firFile.foreach { firPath => -// val outputFile = new java.io.PrintWriter(firPath) -// outputFile.write(res.circuitState.circuit.serialize) -// outputFile.close() -// } -// annoFile.foreach { annoPath => -// val outputFile = new java.io.PrintWriter(annoPath) -// outputFile.write(JsonProtocol.serialize(res.circuitState.annotations.filter(_ match { -// case da: DeletedAnnotation => false -// case ec: EmittedComponent => false -// case ea: EmittedAnnotation[_] => false -// case fca: FirrtlCircuitAnnotation => false -// case _ => true -// }))) -// outputFile.close() -// } -// } -// -// // Top Generation -// protected def executeTop(): Seq[ExtModule] = { -// optionsManager.firrtlOptions = topOptions -// val result = firrtl.Driver.execute(optionsManager) -// result match { -// case x: FirrtlExecutionSuccess => -// dump(x, tapeoutOptions.topFir, tapeoutOptions.topAnnoOut) -// x.circuitState.circuit.modules.collect { case e: ExtModule => e } -// case x => -// throw new Exception(s"executeTop failed while executing FIRRTL!\n${x}") -// } -// } -// -// // Top and harness generation -// protected def executeTopAndHarness(): Unit = { -// // Execute top and get list of ExtModules to avoid collisions -// val topExtModules = executeTop() -// -// val harnessAnnos = -// tapeoutOptions.harnessDotfOut.map(BlackBoxResourceFileNameAnno(_)).toSeq ++ -// harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ -// synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) :+ -// LinkExtModulesAnnotation(topExtModules) -// -// // For harness run, change some firrtlOptions (below) for harness phase -// // customTransforms: setup harness transforms, add AvoidExtModuleCollisions -// // outputFileNameOverride: change to harnessOutput -// // conf file must change to harnessConf by mapping annotations -// optionsManager.firrtlOptions = firrtlOptions.copy( -// customTransforms = firrtlOptions.customTransforms ++ harnessTransforms, -// outputFileNameOverride = tapeoutOptions.harnessOutput.get, -// annotations = firrtlOptions.annotations.map({ -// case ReplSeqMemAnnotation(i, o) => ReplSeqMemAnnotation(i, tapeoutOptions.harnessConf.get) -// case a => a -// }) ++ harnessAnnos -// ) -// val harnessResult = firrtl.Driver.execute(optionsManager) -// harnessResult match { -// case x: FirrtlExecutionSuccess => dump(x, tapeoutOptions.harnessFir, tapeoutOptions.harnessAnnoOut) -// case x => throw new Exception(s"executeHarness failed while executing FIRRTL!\n${x}") -// } -// } -//} -// -////object GenerateTop extends App with GenerateTopAndHarnessApp { -//// // Only need a single phase to generate the top module -//// executeTop() -////} -//// -////object GenerateTopAndHarness extends App with GenerateTopAndHarnessApp { -//// executeTopAndHarness() -////} From 1d6486f9579dd497c515adca2119208e279e3bab Mon Sep 17 00:00:00 2001 From: chick Date: Thu, 1 Jul 2021 10:55:18 -0700 Subject: [PATCH 213/273] Use github actions for testing - Stripped down version from treadle - commented out several sections - delete travis test - fix run-tests.yml - Let's make this 3. --- .github/workflows/run-ci.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .github/workflows/run-ci.yml diff --git a/.github/workflows/run-ci.yml b/.github/workflows/run-ci.yml new file mode 100644 index 000000000..492f417a3 --- /dev/null +++ b/.github/workflows/run-ci.yml @@ -0,0 +1,26 @@ +name: Test + +on: + pull_request: + push: + branches: + - master + +jobs: + test: + name: Unit Tests + runs-on: ubuntu-latest + strategy: + matrix: + scala: [ 2.12.14 ] + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Scala + uses: olafurpg/setup-scala@v10 + - name: Cache + uses: coursier/cache-action@v5 + - name: Get submodules + run: git submodule update --init + - name: Test + run: sbt test From 3d571b2d2ae0b3ac8a3d7257cf2c34fb4f725307 Mon Sep 17 00:00:00 2001 From: chick Date: Thu, 1 Jul 2021 11:05:03 -0700 Subject: [PATCH 214/273] Use github actions for testing - Stripped down version from treadle - commented out several sections - delete travis test - fix run-tests.yml - Let's make this 3. --- .travis.yml | 12 ------------ README.md | 10 ++++++++-- 2 files changed, 8 insertions(+), 14 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0e6fa8113..000000000 --- a/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -git: - submodules: true -language: scala -# run on new infrastructure -sudo: false - -cache: - directories: - $HOME/.ivy2 - -script: - - sbt test diff --git a/README.md b/README.md index ae5f04823..01e263c3e 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,11 @@ -# barstools -Useful utilities for BAR projects +Barstools +================== + +--- + +![Test](https://github.com/freechipsproject/ucb-bar/barstools/Test/badge.svg) + +**Barstools** is a coolection of useful utilities for BAR projects Passes/Transforms that could be useful if added here: * Check that a module was de-duplicated. Useful for MIM CAD flows and currently done in python. From 61ab39f82976aaad09c174bc411015a86691c546 Mon Sep 17 00:00:00 2001 From: John Wright Date: Sun, 11 Jul 2021 17:04:47 -0700 Subject: [PATCH 215/273] Restore proper naming of harness annotation file --- .../barstools/tapeout/transforms/GenerateTopAndHarness.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 4cb195927..9a148591e 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -16,6 +16,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg val synTop: Option[String] = annotations.collectFirst { case SynTopAnnotation(s) => s } val topFir: Option[String] = annotations.collectFirst { case TopFirAnnotation(s) => s } val topAnnoOut: Option[String] = annotations.collectFirst { case TopAnnoOutAnnotation(s) => s } + val harnessAnnoOut: Option[String] = annotations.collectFirst { case HarnessAnnoOutAnnotation(s) => s } val harnessTop: Option[String] = annotations.collectFirst { case HarnessTopAnnotation(h) => h } val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } val harnessOutput: Option[String] = annotations.collectFirst { case HarnessOutputAnnotation(h) => h } @@ -110,7 +111,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => - dump(circuit, annos, topFir, topAnnoOut) + dump(circuit, annos, topFir, harnessAnnoOut) case _ => throw new Exception(s"executeTop failed while executing FIRRTL!\n") } From 479e63c1ce56854496ee7c1c88f20d9ffab5a9a2 Mon Sep 17 00:00:00 2001 From: John Wright Date: Mon, 12 Jul 2021 00:44:51 -0700 Subject: [PATCH 216/273] Fix bug that prunes InstanceTargets out of the AnnotationSeq in ReParentCircuit --- .../tapeout/transforms/ReParentCircuit.scala | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala index 103561765..9043e0b12 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala @@ -7,6 +7,7 @@ import firrtl.annotations._ import firrtl.options.Dependency import firrtl.stage.Forms import firrtl.stage.TransformManager.TransformDependency +import firrtl.annotations.TargetToken.{Instance, OfModule} case class ReParentCircuitAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { def duplicate(n: ModuleTarget) = this.copy(n) @@ -32,6 +33,27 @@ class ReParentCircuit extends Transform with DependencyAPIMigration { rmap.record(CircuitTarget(c.main), CircuitTarget(s)) rmap } - state.copy(circuit = newCircuit, renames = mainRename) + + val newAnnotations = newTopName.map({ topName => + // Update InstanceTargets + def updateInstanceTarget(t: InstanceTarget): Option[InstanceTarget] = { + val idx = t.path.lastIndexWhere(_._2.value == topName) + if (idx == -1) Some(t.copy(circuit=topName)) else Some(t.copy(circuit=topName, module=topName, path=t.path.drop(idx+1))) + } + + AnnotationSeq(state.annotations.toSeq.map({ + case x: SingleTargetAnnotation[InstanceTarget] if x.target.isInstanceOf[InstanceTarget] => + updateInstanceTarget(x.target).map(y => x.duplicate(y)) + case x: MultiTargetAnnotation => + val newTargets: Seq[Seq[Option[Target]]] = x.targets.map(_.map({ + case y: InstanceTarget => updateInstanceTarget(y) + case y => Some(y) + })) + if (newTargets.flatten.forall(_.isDefined)) Some(x.duplicate(newTargets.map(_.map(_.get)))) else None + case x => Some(x) + }).filter(_.isDefined).map(_.get)) + }).getOrElse(state.annotations) + + state.copy(circuit = newCircuit, renames = mainRename, annotations = newAnnotations) } } From 53a2d698e5bcd53db0188ddea081efc6f03a4589 Mon Sep 17 00:00:00 2001 From: John Wright Date: Mon, 12 Jul 2021 21:46:46 -0700 Subject: [PATCH 217/273] Also make ReParentCircuit work on ReferenceTargets --- .../tapeout/transforms/ReParentCircuit.scala | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala index 9043e0b12..82484cce6 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala @@ -35,18 +35,26 @@ class ReParentCircuit extends Transform with DependencyAPIMigration { } val newAnnotations = newTopName.map({ topName => - // Update InstanceTargets - def updateInstanceTarget(t: InstanceTarget): Option[InstanceTarget] = { + // Update InstanceTargets and ReferenceTargets + // Yes, these are identical functions, but the copy methods force separate implementations + def updateInstance(t: InstanceTarget): Option[InstanceTarget] = { + val idx = t.path.lastIndexWhere(_._2.value == topName) + if (idx == -1) Some(t.copy(circuit=topName)) else Some(t.copy(circuit=topName, module=topName, path=t.path.drop(idx+1))) + } + def updateReference(t: ReferenceTarget): Option[ReferenceTarget] = { val idx = t.path.lastIndexWhere(_._2.value == topName) if (idx == -1) Some(t.copy(circuit=topName)) else Some(t.copy(circuit=topName, module=topName, path=t.path.drop(idx+1))) } AnnotationSeq(state.annotations.toSeq.map({ case x: SingleTargetAnnotation[InstanceTarget] if x.target.isInstanceOf[InstanceTarget] => - updateInstanceTarget(x.target).map(y => x.duplicate(y)) + updateInstance(x.target).map(y => x.duplicate(y)) + case x: SingleTargetAnnotation[ReferenceTarget] if x.target.isInstanceOf[ReferenceTarget] => + updateReference(x.target).map(y => x.duplicate(y)) case x: MultiTargetAnnotation => val newTargets: Seq[Seq[Option[Target]]] = x.targets.map(_.map({ - case y: InstanceTarget => updateInstanceTarget(y) + case y: InstanceTarget => updateInstance(y) + case y: ReferenceTarget => updateReference(y) case y => Some(y) })) if (newTargets.flatten.forall(_.isDefined)) Some(x.duplicate(newTargets.map(_.map(_.get)))) else None From 66eee23dc4f9236b4ec21851e969500655e072ec Mon Sep 17 00:00:00 2001 From: John Wright Date: Mon, 12 Jul 2021 19:36:07 -0700 Subject: [PATCH 218/273] Fix harness .fir file output location --- .../barstools/tapeout/transforms/GenerateTopAndHarness.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 9a148591e..b30d9d411 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -15,6 +15,7 @@ import logger.LazyLogging private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogging { val synTop: Option[String] = annotations.collectFirst { case SynTopAnnotation(s) => s } val topFir: Option[String] = annotations.collectFirst { case TopFirAnnotation(s) => s } + val harnessFir: Option[String] = annotations.collectFirst { case HarnessFirAnnotation(s) => s } val topAnnoOut: Option[String] = annotations.collectFirst { case TopAnnoOutAnnotation(s) => s } val harnessAnnoOut: Option[String] = annotations.collectFirst { case HarnessAnnoOutAnnotation(s) => s } val harnessTop: Option[String] = annotations.collectFirst { case HarnessTopAnnotation(h) => h } @@ -111,7 +112,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => - dump(circuit, annos, topFir, harnessAnnoOut) + dump(circuit, annos, harnessFir, harnessAnnoOut) case _ => throw new Exception(s"executeTop failed while executing FIRRTL!\n") } From c907a7377cca17422bab16cdf4c42cc83d3b97ee Mon Sep 17 00:00:00 2001 From: chick Date: Fri, 6 Aug 2021 13:09:45 -0700 Subject: [PATCH 219/273] Moved a zillion files all over the place so that everything is now in tapeout/src in the correct directory corresponding to internal packages. Everything compiles and tests run TODO: - Figure out assembly step for MacroCompiler - Does root project matter? --- .gitmodules | 3 - build.sbt | 14 +- macros/build.sbt | 1 - mdf | 1 - .../resources/barstools/iocell/vsrc/Analog.v | 0 .../resources/barstools/iocell/vsrc/IOCell.v | 0 .../barstools/iocell/chisel/Analog.scala | 0 .../barstools/iocell/chisel/IOCell.scala | 2 +- .../scala/barstools/macros/CostMetric.scala | 0 .../barstools/macros/MacroCompiler.scala | 0 .../scala/barstools/macros/SynFlops.scala | 2 +- .../main/scala/barstools/macros/Utils.scala | 0 .../main/scala/mdf/macrolib/ConfReader.scala | 95 ++++ .../scala/mdf/macrolib/FillerMacroBase.scala | 61 +++ .../scala/mdf/macrolib/FlipChipMacro.scala | 72 +++ .../src/main/scala/mdf/macrolib/IOMacro.scala | 147 ++++++ .../main/scala/mdf/macrolib/MacroLib.scala | 19 + .../src/main/scala/mdf/macrolib/SRAM.scala | 444 ++++++++++++++++++ .../src/main/scala/mdf/macrolib/Utils.scala | 87 ++++ .../src/test/resources/lib-BOOMTest.json | 0 .../src/test/resources/lib-MaskPortTest.json | 0 .../test/resources/lib-WriteEnableTest.json | 0 .../scala/barstools/macros/CostFunction.scala | 0 .../scala/barstools/macros/Functional.scala | 0 .../barstools/macros/MacroCompilerSpec.scala | 0 .../test/scala/barstools/macros/Masks.scala | 0 .../scala/barstools/macros/MultiPort.scala | 0 .../scala/barstools/macros/SRAMCompiler.scala | 0 .../barstools/macros/SimpleSplitDepth.scala | 0 .../barstools/macros/SimpleSplitWidth.scala | 0 .../barstools/macros/SpecificExamples.scala | 6 +- .../scala/barstools/macros/SynFlops.scala | 0 .../tapeout/transforms/GenerateSpec.scala | 2 +- .../transforms/ResetInverterSpec.scala | 4 +- .../scala/mdf/macrolib/ConfReaderSpec.scala | 101 ++++ .../mdf/macrolib/FlipChipMacroSpec.scala | 14 + .../test/scala/mdf/macrolib/IOMacroSpec.scala | 67 +++ .../scala/mdf/macrolib/IOPropertiesSpec.scala | 14 + .../scala/mdf/macrolib/MacroLibOutput.scala | 270 +++++++++++ .../scala/mdf/macrolib/MacroLibSpec.scala | 406 ++++++++++++++++ 40 files changed, 1809 insertions(+), 23 deletions(-) delete mode 100644 .gitmodules delete mode 100644 macros/build.sbt delete mode 160000 mdf rename {iocell => tapeout}/src/main/resources/barstools/iocell/vsrc/Analog.v (100%) rename {iocell => tapeout}/src/main/resources/barstools/iocell/vsrc/IOCell.v (100%) rename {iocell => tapeout}/src/main/scala/barstools/iocell/chisel/Analog.scala (100%) rename {iocell => tapeout}/src/main/scala/barstools/iocell/chisel/IOCell.scala (99%) rename {macros => tapeout}/src/main/scala/barstools/macros/CostMetric.scala (100%) rename {macros => tapeout}/src/main/scala/barstools/macros/MacroCompiler.scala (100%) rename {macros => tapeout}/src/main/scala/barstools/macros/SynFlops.scala (98%) rename {macros => tapeout}/src/main/scala/barstools/macros/Utils.scala (100%) create mode 100644 tapeout/src/main/scala/mdf/macrolib/ConfReader.scala create mode 100644 tapeout/src/main/scala/mdf/macrolib/FillerMacroBase.scala create mode 100644 tapeout/src/main/scala/mdf/macrolib/FlipChipMacro.scala create mode 100644 tapeout/src/main/scala/mdf/macrolib/IOMacro.scala create mode 100644 tapeout/src/main/scala/mdf/macrolib/MacroLib.scala create mode 100644 tapeout/src/main/scala/mdf/macrolib/SRAM.scala create mode 100644 tapeout/src/main/scala/mdf/macrolib/Utils.scala rename {macros => tapeout}/src/test/resources/lib-BOOMTest.json (100%) rename {macros => tapeout}/src/test/resources/lib-MaskPortTest.json (100%) rename {macros => tapeout}/src/test/resources/lib-WriteEnableTest.json (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/CostFunction.scala (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/Functional.scala (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/MacroCompilerSpec.scala (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/Masks.scala (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/MultiPort.scala (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/SRAMCompiler.scala (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/SimpleSplitDepth.scala (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/SimpleSplitWidth.scala (100%) rename {macros => tapeout}/src/test/scala/barstools/macros/SpecificExamples.scala (99%) rename {macros => tapeout}/src/test/scala/barstools/macros/SynFlops.scala (100%) create mode 100644 tapeout/src/test/scala/mdf/macrolib/ConfReaderSpec.scala create mode 100644 tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala create mode 100644 tapeout/src/test/scala/mdf/macrolib/IOMacroSpec.scala create mode 100644 tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala create mode 100644 tapeout/src/test/scala/mdf/macrolib/MacroLibOutput.scala create mode 100644 tapeout/src/test/scala/mdf/macrolib/MacroLibSpec.scala diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 733ebcf64..000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "mdf"] - path = mdf - url = https://github.com/ucb-bar/plsi-mdf.git diff --git a/build.sbt b/build.sbt index d88408656..1fe835056 100644 --- a/build.sbt +++ b/build.sbt @@ -14,7 +14,8 @@ lazy val commonSettings = Seq( dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) }, libraryDependencies ++= Seq( - "org.scalatest" %% "scalatest" % "3.2.2" % "test", + "com.typesafe.play" %% "play-json" % "2.9.2", + "org.scalatest" %% "scalatest" % "3.2.9" % "test", ), resolvers ++= Seq( Resolver.sonatypeRepo("snapshots"), @@ -25,17 +26,10 @@ lazy val commonSettings = Seq( disablePlugins(sbtassembly.AssemblyPlugin) -lazy val mdf = (project in file("mdf/scalalib")) -lazy val macros = (project in file("macros")) - .dependsOn(mdf) - .settings(commonSettings) - .settings( - mainClass := Some("barstools.macros.MacroCompiler") - ) - .enablePlugins(sbtassembly.AssemblyPlugin) +enablePlugins(sbtassembly.AssemblyPlugin) lazy val tapeout = (project in file("tapeout")) .settings(commonSettings) .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) -lazy val root = (project in file(".")).aggregate(macros, tapeout) +lazy val root = (project in file(".")).aggregate(tapeout) diff --git a/macros/build.sbt b/macros/build.sbt deleted file mode 100644 index 65e9704a1..000000000 --- a/macros/build.sbt +++ /dev/null @@ -1 +0,0 @@ -enablePlugins(sbtassembly.AssemblyPlugin) diff --git a/mdf b/mdf deleted file mode 160000 index e588024d7..000000000 --- a/mdf +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e588024d706220b73f2c97ca75d6fec8dd0d41b1 diff --git a/iocell/src/main/resources/barstools/iocell/vsrc/Analog.v b/tapeout/src/main/resources/barstools/iocell/vsrc/Analog.v similarity index 100% rename from iocell/src/main/resources/barstools/iocell/vsrc/Analog.v rename to tapeout/src/main/resources/barstools/iocell/vsrc/Analog.v diff --git a/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v b/tapeout/src/main/resources/barstools/iocell/vsrc/IOCell.v similarity index 100% rename from iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v rename to tapeout/src/main/resources/barstools/iocell/vsrc/IOCell.v diff --git a/iocell/src/main/scala/barstools/iocell/chisel/Analog.scala b/tapeout/src/main/scala/barstools/iocell/chisel/Analog.scala similarity index 100% rename from iocell/src/main/scala/barstools/iocell/chisel/Analog.scala rename to tapeout/src/main/scala/barstools/iocell/chisel/Analog.scala diff --git a/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala b/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala similarity index 99% rename from iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala rename to tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala index d244d2984..47b7cf2e7 100644 --- a/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -148,7 +148,7 @@ object IOCell { padSignal: T, name: Option[String] = None, typeParams: IOCellTypeParams = GenericIOCellParams(), - concretizeResetFn: (Reset) => R = toSyncReset + concretizeResetFn: (Reset) => R = toSyncReset _ ): Seq[IOCell] = { def genCell[T <: Data]( castToBool: (T) => Bool, diff --git a/macros/src/main/scala/barstools/macros/CostMetric.scala b/tapeout/src/main/scala/barstools/macros/CostMetric.scala similarity index 100% rename from macros/src/main/scala/barstools/macros/CostMetric.scala rename to tapeout/src/main/scala/barstools/macros/CostMetric.scala diff --git a/macros/src/main/scala/barstools/macros/MacroCompiler.scala b/tapeout/src/main/scala/barstools/macros/MacroCompiler.scala similarity index 100% rename from macros/src/main/scala/barstools/macros/MacroCompiler.scala rename to tapeout/src/main/scala/barstools/macros/MacroCompiler.scala diff --git a/macros/src/main/scala/barstools/macros/SynFlops.scala b/tapeout/src/main/scala/barstools/macros/SynFlops.scala similarity index 98% rename from macros/src/main/scala/barstools/macros/SynFlops.scala rename to tapeout/src/main/scala/barstools/macros/SynFlops.scala index 77ea4c962..5d39cda94 100644 --- a/macros/src/main/scala/barstools/macros/SynFlops.scala +++ b/tapeout/src/main/scala/barstools/macros/SynFlops.scala @@ -64,7 +64,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa val readConnects = real_macro.readers.zipWithIndex.flatMap { case (r, i) => val clock = portToExpression(r.src.clock.get) val address = portToExpression(r.src.address) - val enable = (r.src chipEnable, r.src readEnable) match { + val enable = (r.src.chipEnable, r.src.readEnable) match { case (Some(en_port), Some(re_port)) => and(portToExpression(en_port), portToExpression(re_port)) case (Some(en_port), None) => portToExpression(en_port) diff --git a/macros/src/main/scala/barstools/macros/Utils.scala b/tapeout/src/main/scala/barstools/macros/Utils.scala similarity index 100% rename from macros/src/main/scala/barstools/macros/Utils.scala rename to tapeout/src/main/scala/barstools/macros/Utils.scala diff --git a/tapeout/src/main/scala/mdf/macrolib/ConfReader.scala b/tapeout/src/main/scala/mdf/macrolib/ConfReader.scala new file mode 100644 index 000000000..ec701d6ee --- /dev/null +++ b/tapeout/src/main/scala/mdf/macrolib/ConfReader.scala @@ -0,0 +1,95 @@ +package mdf.macrolib + +object ConfReader { + import scala.util.matching.Regex._ + + type ConfPort = (String, Boolean) // prefix (e.g. "RW0") and true if masked + + /** Rename ports like "read" to R0, "write" to W0, and "rw" to RW0, and + * return a count of read, write, and readwrite ports. + */ + def renamePorts(ports: Seq[String]): (Seq[ConfPort], Int, Int, Int) = { + var readCount = 0 + var writeCount = 0 + var readWriteCount = 0 + ( + ports.map { + _ match { + case "read" => readCount += 1; (s"R${readCount - 1}", false) + case "write" => writeCount += 1; (s"W${writeCount - 1}", false) + case "mwrite" => writeCount += 1; (s"W${writeCount - 1}", true) + case "rw" => readWriteCount += 1; (s"RW${readWriteCount - 1}", false) + case "mrw" => readWriteCount += 1; (s"RW${readWriteCount - 1}", true) + } + }, + readCount, + writeCount, + readWriteCount + ) + } + + def generateFirrtlPort(port: ConfPort, width: Int, depth: Int, maskGran: Option[Int]): MacroPort = { + val (prefix, masked) = port + val isReadWriter = prefix.startsWith("RW") + val isReader = prefix.startsWith("R") && !isReadWriter + val isWriter = prefix.startsWith("W") + val r = if (isReadWriter) "r" else "" + val w = if (isReadWriter) "w" else "" + MacroPort( + address = PolarizedPort(s"${prefix}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${prefix}_clk", PositiveEdge)), + writeEnable = if (isReadWriter) Some(PolarizedPort(s"${prefix}_${w}mode", ActiveHigh)) else None, + output = if (isReader || isReadWriter) Some(PolarizedPort(s"${prefix}_${w}data", ActiveHigh)) else None, + input = if (isWriter || isReadWriter) Some(PolarizedPort(s"${prefix}_${r}data", ActiveHigh)) else None, + maskPort = if (masked) Some(PolarizedPort(s"${prefix}_${w}mask", ActiveHigh)) else None, + maskGran = if (masked) maskGran else None, + width = Some(width), + depth = Some(depth) + ) + } + + /** Read a conf line into a SRAMMacro, but returns an error string in Left + * instead of throwing errors if the line is malformed. + */ + def readSingleLineSafe(line: String): Either[String, SRAMMacro] = { + val pattern = """name ([^\s]+) depth (\d+) width (\d+) ports ([a-z,]+)\s?(?:mask_gran (\d+))?""".r + pattern.findFirstMatchIn(line) match { + case Some(m: Match) => { + val name: String = m.group(1) + val depth: Int = (m.group(2)).toInt + val width: Int = (m.group(3)).toInt + val ports: Seq[String] = (m.group(4)).split(",") + val (firrtlPorts, readPortCount, writePortCount, readWritePortCount) = renamePorts(ports) + val familyStr = + (if (readPortCount > 0) s"${readPortCount}r" else "") + + (if (writePortCount > 0) s"${writePortCount}w" else "") + + (if (readWritePortCount > 0) s"${readWritePortCount}rw" else "") + val maskGran: Option[Int] = Option(m.group(5)).map(_.toInt) + Right( + SRAMMacro( + name = name, + width = width, + depth = depth, + family = familyStr, + vt = "", + mux = 1, + ports = firrtlPorts.map(generateFirrtlPort(_, width, depth, maskGran)), + extraPorts = List() + ) + ) + } + case _ => Left("Input line did not match conf regex") + } + } + + /** Read a conf line into a SRAMMacro. */ + def readSingleLine(line: String): SRAMMacro = { + readSingleLineSafe(line).right.get + } + + /** Read the contents of the conf file into a seq of SRAMMacro. */ + def readFromString(contents: String): Seq[SRAMMacro] = { + // Trim, remove empty lines, then pass to readSingleLine + contents.split("\n").map(_.trim).filter(_ != "").map(readSingleLine(_)) + } +} diff --git a/tapeout/src/main/scala/mdf/macrolib/FillerMacroBase.scala b/tapeout/src/main/scala/mdf/macrolib/FillerMacroBase.scala new file mode 100644 index 000000000..688871b5d --- /dev/null +++ b/tapeout/src/main/scala/mdf/macrolib/FillerMacroBase.scala @@ -0,0 +1,61 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.language.implicitConversions + +// Filler and metal filler +abstract class FillerMacroBase(name: String, vt: String) extends Macro { + override def toString(): String = { + s"${this.getClass.getSimpleName}(name=${name}, vt=${vt})" + } + + override def toJSON(): JsObject = { + JsObject( + Seq( + "type" -> JsString(typeStr), + "name" -> Json.toJson(name), + "vt" -> Json.toJson(vt) + ) + ) + } +} +object FillerMacroBase { + def parseJSON(json: Map[String, JsValue]): Option[FillerMacroBase] = { + val typee: String = json.get("type") match { + case Some(x: JsString) => + x.value match { + case "" => return None + case x => x + } + case _ => return None + } + val name: String = json.get("name") match { + case Some(x: JsString) => + x.value match { + case "" => return None + case x => x + } + case _ => return None + } + val vt: String = json.get("vt") match { + case Some(x: JsString) => + x.value match { + case "" => return None + case x => x + } + case _ => return None + } + typee match { + case "metal filler cell" => Some(MetalFillerMacro(name, vt)) + case "filler cell" => Some(FillerMacro(name, vt)) + case _ => None + } + } +} + +case class FillerMacro(name: String, vt: String) extends FillerMacroBase(name, vt) { + override def typeStr = "filler cell" +} +case class MetalFillerMacro(name: String, vt: String) extends FillerMacroBase(name, vt) { + override def typeStr = "metal filler cell" +} diff --git a/tapeout/src/main/scala/mdf/macrolib/FlipChipMacro.scala b/tapeout/src/main/scala/mdf/macrolib/FlipChipMacro.scala new file mode 100644 index 000000000..45b49d86a --- /dev/null +++ b/tapeout/src/main/scala/mdf/macrolib/FlipChipMacro.scala @@ -0,0 +1,72 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +// Flip Chip Macro +case class FlipChipMacro( + name: String, + bumpDimensions: (Int, Int), + bumpLocations: Seq[Seq[String]]) + extends Macro { + override def toJSON(): JsObject = { + + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "name" -> Json.toJson(name), + "type" -> Json.toJson(typeStr), + "bump_dimensions" -> JsArray(Seq(bumpDimensions._1, bumpDimensions._2).map { JsNumber(_) }), + "bump_locations" -> JsArray(bumpLocations.map(l => JsArray(l.map(JsString)))) + ) + ) + + JsObject(output) + } + val maxIONameSize = bumpLocations.foldLeft(0) { (size, row) => + row.foldLeft(size) { (size, str) => scala.math.max(size, str.length) } + } + def visualize: String = { + val output = new StringBuffer() + for (x <- 0 until bumpDimensions._1) { + for (y <- 0 until bumpDimensions._2) { + val name = bumpLocations(x)(y).drop(1).dropRight(1) + val extra = maxIONameSize - name.length() + val leftSpace = " " * (extra / 2) + val rightSpace = " " * (extra / 2 + extra % 2) + output.append(leftSpace + name + rightSpace + "|") + } + output.append("\n") + } + output.toString() + } + + override def typeStr = "flipchip" +} + +object FlipChipMacro { + def parseJSON(json: Map[String, JsValue]): Option[FlipChipMacro] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + + val bumpDimensions: (Int, Int) = json.get("bump_dimensions") match { + case Some(JsArray(x)) if x.size == 2 => + val z = x.map(_.as[JsNumber].value.intValue()) + (z(0), z(1)) + case None => return None + } + val bumpLocations: Seq[Seq[String]] = json.get("bump_locations") match { + case Some(JsArray(array)) => + array.collect { case JsArray(a2) => a2.map(_.toString) } + case _ => return None + } + // Can't have dimensions and locations which don't match + if (bumpLocations.size != bumpDimensions._1) return None + if (bumpLocations.collect { case x if x.size != bumpDimensions._2 => x }.nonEmpty) return None + + Some(FlipChipMacro(name, bumpDimensions, bumpLocations)) + } +} diff --git a/tapeout/src/main/scala/mdf/macrolib/IOMacro.scala b/tapeout/src/main/scala/mdf/macrolib/IOMacro.scala new file mode 100644 index 000000000..3f8ead8c2 --- /dev/null +++ b/tapeout/src/main/scala/mdf/macrolib/IOMacro.scala @@ -0,0 +1,147 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +sealed abstract class PortType { def toJSON(): JsString = JsString(toString) } +case object Digital extends PortType { override def toString: String = "digital" } +case object Analog extends PortType { override def toString: String = "analog" } +case object Power extends PortType { override def toString: String = "power" } +case object Ground extends PortType { override def toString: String = "ground" } +case object NoConnect extends PortType { override def toString: String = "NC" } + +sealed abstract class Direction { def toJSON(): JsString = JsString(toString) } +case object Input extends Direction { override def toString: String = "input" } +case object Output extends Direction { override def toString: String = "output" } +case object InOut extends Direction { override def toString: String = "inout" } + +sealed abstract class Termination { def toJSON(): JsValue } +case object CMOS extends Termination { override def toJSON(): JsString = JsString("CMOS") } +case class Resistive(ohms: Int) extends Termination { override def toJSON(): JsNumber = JsNumber(ohms) } + +sealed abstract class TerminationType { def toJSON(): JsString } +case object Single extends TerminationType { override def toJSON(): JsString = JsString("single") } +case object Differential extends TerminationType { override def toJSON(): JsString = JsString("differential") } + +// IO macro +case class IOMacro( + name: String, + tpe: PortType, + direction: Option[Direction] = None, + termination: Option[Termination] = None, + terminationType: Option[TerminationType] = None, + terminationReference: Option[String] = None, + matching: Seq[String] = Seq.empty[String], + bbname: Option[String] = None) + extends Macro { + override def toJSON(): JsObject = { + + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "name" -> Json.toJson(name), + "type" -> tpe.toJSON() + ) + ) + if (direction.isDefined) output.append("direction" -> direction.get.toJSON) + if (termination.isDefined) output.append("termination" -> termination.get.toJSON) + if (terminationType.isDefined) output.append("terminationType" -> terminationType.get.toJSON) + if (terminationReference.isDefined) output.append("terminationReference" -> JsString(terminationReference.get)) + if (matching.nonEmpty) output.append("match" -> JsArray(matching.map(JsString))) + if (bbname.nonEmpty) output.append("blackBox" -> JsString(bbname.get)) + + JsObject(output) + } + + override def typeStr = "iomacro" +} +object IOMacro { + def parseJSON(json: Map[String, JsValue]): Option[IOMacro] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val tpe: PortType = json.get("type") match { + case Some(JsString("power")) => Power + case Some(JsString("ground")) => Ground + case Some(JsString("digital")) => Digital + case Some(JsString("analog")) => Analog + case Some(JsString("NC")) => NoConnect + case _ => return None + } + val direction: Option[Direction] = json.get("direction") match { + case Some(JsString("input")) => Some(Input) + case Some(JsString("output")) => Some(Output) + case Some(JsString("inout")) => Some(InOut) + case _ => None + } + val termination: Option[Termination] = json.get("termination") match { + case Some(JsNumber(x)) => Some(Resistive(x.toInt)) + case Some(JsString("CMOS")) => Some(CMOS) + case _ => None + } + val terminationType: Option[TerminationType] = json.get("terminationType") match { + case Some(JsString("differential")) => Some(Differential) + case Some(JsString("single")) => Some(Single) + case _ => None + } + val terminationRef: Option[String] = json.get("terminationReference") match { + case Some(JsString(x)) => Some(x) + case _ if terminationType.isDefined => return None + case _ => None + } + val matching: Seq[String] = json.get("match") match { + case Some(JsArray(array)) => array.map(_.as[JsString].value).toList + case _ => Seq.empty[String] + } + val bbname: Option[String] = json.get("blackBox") match { + case Some(JsString(module)) => Some(module) + case Some(_) => return None + case _ => None + } + Some(IOMacro(name, tpe, direction, termination, terminationType, terminationRef, matching, bbname)) + } +} + +case class IOProperties(name: String, top: String, ios: Seq[IOMacro]) extends Macro { + override def toJSON(): JsObject = { + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "name" -> Json.toJson(name), + "top" -> Json.toJson(top), + "type" -> Json.toJson(typeStr), + "ios" -> JsArray(ios.map(_.toJSON)) + ) + ) + JsObject(output) + } + + override def typeStr = "io_properties" + +} + +object IOProperties { + def parseJSON(json: Map[String, JsValue]): Option[IOProperties] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val top: String = json.get("top") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val ios: Seq[IOMacro] = json.get("ios") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + val b = IOMacro.parseJSON(a); + if (b == None) { + return None + } else b.get + } + case _ => List() + } + Some(IOProperties(name, top, ios)) + } +} diff --git a/tapeout/src/main/scala/mdf/macrolib/MacroLib.scala b/tapeout/src/main/scala/mdf/macrolib/MacroLib.scala new file mode 100644 index 000000000..569c4dacb --- /dev/null +++ b/tapeout/src/main/scala/mdf/macrolib/MacroLib.scala @@ -0,0 +1,19 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +// TODO: decide if we should always silently absorb errors + +// See macro_format.yml for the format description. + +// "Base class" for macros +abstract class Macro { + def name: String + + // Type of macro is determined by subclass + def typeStr: String + + def toJSON(): JsObject +} diff --git a/tapeout/src/main/scala/mdf/macrolib/SRAM.scala b/tapeout/src/main/scala/mdf/macrolib/SRAM.scala new file mode 100644 index 000000000..ea51b0490 --- /dev/null +++ b/tapeout/src/main/scala/mdf/macrolib/SRAM.scala @@ -0,0 +1,444 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +// SRAM macro +case class SRAMMacro( + name: String, + width: Int, + depth: BigInt, + family: String, + ports: Seq[MacroPort], + vt: String = "", + mux: Int = 1, + extraPorts: Seq[MacroExtraPort] = List()) + extends Macro { + override def toJSON(): JsObject = { + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "type" -> JsString("sram"), + "name" -> Json.toJson(name), + "width" -> Json.toJson(width), + "depth" -> Json.toJson(depth.toString), + "mux" -> Json.toJson(mux), + "mask" -> Json.toJson(ports.exists(p => p.maskPort.isDefined)), + "ports" -> JsArray(ports.map { _.toJSON }) + ) + ) + if (family != "") { + output.appendAll(Seq("family" -> Json.toJson(family))) + } + if (vt != "") { + output.appendAll(Seq("vt" -> Json.toJson(vt))) + } + if (extraPorts.length > 0) { + output.appendAll(Seq("extra ports" -> JsArray(extraPorts.map { _.toJSON }))) + } + + JsObject(output) + } + + override def typeStr = "sram" +} +object SRAMMacro { + def parseJSON(json: Map[String, JsValue]): Option[SRAMMacro] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val width: Int = json.get("width") match { + case Some(x: JsNumber) => x.value.intValue + case _ => return None + } + val depth: BigInt = json.get("depth") match { + case Some(x: JsString) => + try { BigInt(x.as[String]) } + catch { case _: Throwable => return None } + case _ => return None + } + val family: String = json.get("family") match { + case Some(x: JsString) => x.as[String] + case _ => "" // optional + } + val vt: String = json.get("vt") match { + case Some(x: JsString) => x.as[String] + case _ => "" // optional + } + val mux: Int = json.get("mux") match { + case Some(x: JsNumber) => x.value.intValue + case _ => 1 // default + } + val ports: Seq[MacroPort] = json.get("ports") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + val b = MacroPort.parseJSON(a, width, depth); + if (b == None) { + return None + } else b.get + } + case _ => List() + } + if (ports.length == 0) { + // Can't have portless memories. + return None + } + val extraPorts: Seq[MacroExtraPort] = json.get("extra ports") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + val b = MacroExtraPort.parseJSON(a); + if (b == None) { + return None + } else b.get + } + case _ => List() + } + Some(SRAMMacro(name, width, depth, family, ports, vt, mux, extraPorts)) + } +} + +// SRAM compiler +case class SRAMGroup( + name: Seq[String], + family: String, + vt: Seq[String], + mux: Int, + depth: Range, + width: Range, + ports: Seq[MacroPort], + extraPorts: Seq[MacroExtraPort] = List()) { + def toJSON: JsObject = { + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "name" -> JsArray(name.map(Json.toJson(_))), + "vt" -> JsArray(vt.map(Json.toJson(_))), + "mux" -> Json.toJson(mux), + "depth" -> JsArray(Seq(depth.start, depth.end, depth.step).map { x => Json.toJson(x) }), + "width" -> JsArray(Seq(width.start, width.end, width.step).map { x => Json.toJson(x) }), + "ports" -> JsArray(ports.map { _.toJSON }) + ) + ) + if (family != "") { + output.appendAll(Seq("family" -> Json.toJson(family))) + } + if (extraPorts.length > 0) { + output.appendAll(Seq("extra ports" -> JsArray(extraPorts.map { _.toJSON }))) + } + JsObject(output) + } +} +object SRAMGroup { + def parseJSON(json: Map[String, JsValue]): Option[SRAMGroup] = { + val family: String = json.get("family") match { + case Some(x: JsString) => x.as[String] + case _ => "" // optional + } + val name: Seq[String] = json.get("name") match { + case Some(x: JsArray) => x.as[List[JsString]].map(_.as[String]) + case _ => return None + } + val vt: Seq[String] = json.get("vt") match { + case Some(x: JsArray) => x.as[List[JsString]].map(_.as[String]) + case _ => return None + } + val mux: Int = json.get("mux") match { + case Some(x: JsNumber) => x.value.intValue + case _ => return None + } + val depth: Range = json.get("depth") match { + case Some(x: JsArray) => + val seq = x.as[List[JsNumber]].map(_.value.intValue) + Range.inclusive(seq(0), seq(1), seq(2)) + case _ => return None + } + val width: Range = json.get("width") match { + case Some(x: JsArray) => + val seq = x.as[List[JsNumber]].map(_.value.intValue) + Range.inclusive(seq(0), seq(1), seq(2)) + case _ => return None + } + val ports: Seq[MacroPort] = json.get("ports") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + { + val b = MacroPort.parseJSON(a, None, None); + if (b == None) { + return None + } else b.get + } + } + case _ => List() + } + if (ports.length == 0) { + // Can't have portless memories. + return None + } + val extraPorts: Seq[MacroExtraPort] = json.get("extra ports") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + { + val b = MacroExtraPort.parseJSON(a); + if (b == None) { + return None + } else b.get + } + } + case _ => List() + } + Some(SRAMGroup(name, family, vt, mux, depth, width, ports, extraPorts)) + } +} + +case class SRAMCompiler( + name: String, + groups: Seq[SRAMGroup]) + extends Macro { + override def toJSON(): JsObject = { + val output = new ListBuffer[(String, JsValue)]() + output.appendAll( + Seq( + "type" -> Json.toJson("sramcompiler"), + "name" -> Json.toJson(name), + "groups" -> JsArray(groups.map { _.toJSON }) + ) + ) + + JsObject(output) + } + + override def typeStr = "sramcompiler" +} +object SRAMCompiler { + def parseJSON(json: Map[String, JsValue]): Option[SRAMCompiler] = { + val name: String = json.get("name") match { + case Some(x: JsString) => x.as[String] + case _ => return None + } + val groups: Seq[SRAMGroup] = json.get("groups") match { + case Some(x: JsArray) => + x.as[List[Map[String, JsValue]]].map { a => + { + val b = SRAMGroup.parseJSON(a); + if (b == None) { return None } + else b.get + } + } + case _ => List() + } + if (groups.length == 0) { + // Can't have portless memories. + return None + } + Some(SRAMCompiler(name, groups)) + } +} + +// Type of extra port +sealed abstract class MacroExtraPortType +case object Constant extends MacroExtraPortType +object MacroExtraPortType { + implicit def toMacroExtraPortType(s: Any): Option[MacroExtraPortType] = { + s match { + case "constant" => Some(Constant) + case _ => None + } + } + + implicit def toString(t: MacroExtraPortType): String = { + t match { + case Constant => "constant" + case _ => "" + } + } +} + +// Extra port in SRAM +case class MacroExtraPort( + name: String, + width: Int, + portType: MacroExtraPortType, + value: BigInt) { + def toJSON(): JsObject = { + JsObject( + Seq( + "name" -> Json.toJson(name), + "width" -> Json.toJson(width), + "type" -> JsString(MacroExtraPortType.toString(portType)), + "value" -> JsNumber(BigDecimal(value)) + ) + ) + } +} +object MacroExtraPort { + def parseJSON(json: Map[String, JsValue]): Option[MacroExtraPort] = { + val name = json.get("name") match { + case Some(x: JsString) => x.value + case _ => return None + } + val width = json.get("width") match { + case Some(x: JsNumber) => x.value.intValue + case _ => return None + } + val portType: MacroExtraPortType = json.get("type") match { + case Some(x: JsString) => + MacroExtraPortType.toMacroExtraPortType(x.value) match { + case Some(t: MacroExtraPortType) => t + case _ => return None + } + case _ => return None + } + val value = json.get("value") match { + case Some(x: JsNumber) => x.value.toBigInt + case _ => return None + } + Some(MacroExtraPort(name, width, portType, value)) + } +} + +// A named port that also has polarity. +case class PolarizedPort(name: String, polarity: PortPolarity) { + def toSeqMap(prefix: String): Seq[Tuple2[String, JsValue]] = { + Seq( + prefix + " port name" -> Json.toJson(name), + prefix + " port polarity" -> JsString(polarity) + ) + } +} +object PolarizedPort { + // Parse a pair of " port name" and " port polarity" keys into a + // polarized port definition. + def parseJSON(json: Map[String, JsValue], prefix: String): Option[PolarizedPort] = { + val name = json.get(prefix + " port name") match { + case Some(x: JsString) => Some(x.value) + case _ => None + } + val polarity: Option[PortPolarity] = json.get(prefix + " port polarity") match { + case Some(x: JsString) => Some(x.value) + case _ => None + } + + (name, polarity) match { + case (Some(n: String), Some(p: PortPolarity)) => Some(PolarizedPort(n, p)) + case _ => None + } + } +} + +// A SRAM memory port +case class MacroPort( + address: PolarizedPort, + clock: Option[PolarizedPort] = None, + writeEnable: Option[PolarizedPort] = None, + readEnable: Option[PolarizedPort] = None, + chipEnable: Option[PolarizedPort] = None, + output: Option[PolarizedPort] = None, + input: Option[PolarizedPort] = None, + maskPort: Option[PolarizedPort] = None, + maskGran: Option[Int] = None, + // For internal use only; these aren't port-specific. + width: Option[Int], + depth: Option[BigInt]) { + def effectiveMaskGran = maskGran.getOrElse(width.get) + + def toJSON(): JsObject = { + val keys: Seq[Tuple2[String, Option[Any]]] = Seq( + "address" -> Some(address), + "clock" -> clock, + "write enable" -> writeEnable, + "read enable" -> readEnable, + "chip enable" -> chipEnable, + "output" -> output, + "input" -> input, + "mask" -> maskPort, + "mask granularity" -> maskGran + ) + JsObject(keys.flatMap(k => { + val (key, value) = k + value match { + case Some(x: Int) => Seq(key -> JsNumber(x)) + case Some(x: PolarizedPort) => x.toSeqMap(key) + case _ => List() + } + })) + } + + // Check that all port names are unique. + private val polarizedPorts = + List(Some(address), clock, writeEnable, readEnable, chipEnable, output, input, maskPort).flatten + assert(polarizedPorts.distinct.size == polarizedPorts.size, "All port names must be unique") +} +object MacroPort { + def parseJSON(json: Map[String, JsValue]): Option[MacroPort] = parseJSON(json, None, None) + def parseJSON(json: Map[String, JsValue], width: Int, depth: BigInt): Option[MacroPort] = + parseJSON(json, Some(width), Some(depth)) + def parseJSON(json: Map[String, JsValue], width: Option[Int], depth: Option[BigInt]): Option[MacroPort] = { + val address = PolarizedPort.parseJSON(json, "address") + if (address == None) { + return None + } + + val clock = PolarizedPort.parseJSON(json, "clock") + // TODO: validate based on family (e.g. 1rw must have a write enable, etc) + val writeEnable = PolarizedPort.parseJSON(json, "write enable") + val readEnable = PolarizedPort.parseJSON(json, "read enable") + val chipEnable = PolarizedPort.parseJSON(json, "chip enable") + + val output = PolarizedPort.parseJSON(json, "output") + val input = PolarizedPort.parseJSON(json, "input") + + val maskPort = PolarizedPort.parseJSON(json, "mask") + val maskGran: Option[Int] = json.get("mask granularity") match { + case Some(x: JsNumber) => Some(x.value.intValue) + case _ => None + } + + if (maskPort.isDefined != maskGran.isDefined) { + return None + } + + Some( + MacroPort( + width = width, + depth = depth, + address = address.get, + clock = clock, + writeEnable = writeEnable, + readEnable = readEnable, + chipEnable = chipEnable, + output = output, + input = input, + maskPort = maskPort, + maskGran = maskGran + ) + ) + } +} + +// Port polarity +trait PortPolarity +case object ActiveLow extends PortPolarity +case object ActiveHigh extends PortPolarity +case object NegativeEdge extends PortPolarity +case object PositiveEdge extends PortPolarity +object PortPolarity { + implicit def toPortPolarity(s: String): PortPolarity = (s: @unchecked) match { + case "active low" => ActiveLow + case "active high" => ActiveHigh + case "negative edge" => NegativeEdge + case "positive edge" => PositiveEdge + } + implicit def toPortPolarity(s: Option[String]): Option[PortPolarity] = + s.map(toPortPolarity) + + implicit def toString(p: PortPolarity): String = { + p match { + case ActiveLow => "active low" + case ActiveHigh => "active high" + case NegativeEdge => "negative edge" + case PositiveEdge => "positive edge" + } + } +} diff --git a/tapeout/src/main/scala/mdf/macrolib/Utils.scala b/tapeout/src/main/scala/mdf/macrolib/Utils.scala new file mode 100644 index 000000000..795cff0ac --- /dev/null +++ b/tapeout/src/main/scala/mdf/macrolib/Utils.scala @@ -0,0 +1,87 @@ +package mdf.macrolib + +import play.api.libs.json._ +import scala.collection.mutable.ListBuffer +import scala.language.implicitConversions + +object Utils { + // Read a MDF file from a String. + def readMDFFromString(str: String): Option[Seq[Macro]] = { + Json.parse(str) match { + // Make sure that the document is a list. + case arr: JsArray => { + val result: List[Option[Macro]] = arr.as[List[Map[String, JsValue]]].map { obj => + // Check the type of object. + val objTypeStr: String = obj.get("type") match { + case Some(x: JsString) => x.as[String] + case _ => return None // error, no type found + } + objTypeStr match { + case "filler cell" | "metal filler cell" => FillerMacroBase.parseJSON(obj) + case "sram" => SRAMMacro.parseJSON(obj) + case "sramcompiler" => SRAMCompiler.parseJSON(obj) + case "io_properties" => IOProperties.parseJSON(obj) + case "flipchip" => FlipChipMacro.parseJSON(obj) + case _ => None // skip unknown macro types + } + } + // Remove all the Nones and convert back to Seq[Macro] + Some(result.filter { x => x != None }.map { x => x.get }) + } + case _ => None + } + } + + // Read a MDF file from a path. + def readMDFFromPath(path: Option[String]): Option[Seq[Macro]] = { + path match { + case None => None + // Read file into string and parse + case Some(p) => Utils.readMDFFromString(scala.io.Source.fromFile(p).mkString) + } + } + + // Write a MDF file to a String. + def writeMDFToString(s: Seq[Macro]): String = { + Json.prettyPrint(JsArray(s.map(_.toJSON))) + } + + // Write a MDF file from a path. + // Returns true upon success. + def writeMDFToPath(path: Option[String], s: Seq[Macro]): Boolean = { + path match { + case None => false + // Read file into string and parse + case Some(p: String) => { + import java.io._ + val pw = new PrintWriter(new File(p)) + pw.write(writeMDFToString(s)) + val error = pw.checkError + pw.close() + !error + } + } + } + + // Write a macro file to a String. + def writeMacroToString(s: Macro): String = { + Json.prettyPrint(s.toJSON) + } + + // Write a Macro file from a path. + // Returns true upon success. + def writeMacroToPath(path: Option[String], s: Macro): Boolean = { + path match { + case None => false + // Read file into string and parse + case Some(p: String) => { + import java.io._ + val pw = new PrintWriter(new File(p)) + pw.write(writeMacroToString(s)) + val error = pw.checkError + pw.close() + !error + } + } + } +} diff --git a/macros/src/test/resources/lib-BOOMTest.json b/tapeout/src/test/resources/lib-BOOMTest.json similarity index 100% rename from macros/src/test/resources/lib-BOOMTest.json rename to tapeout/src/test/resources/lib-BOOMTest.json diff --git a/macros/src/test/resources/lib-MaskPortTest.json b/tapeout/src/test/resources/lib-MaskPortTest.json similarity index 100% rename from macros/src/test/resources/lib-MaskPortTest.json rename to tapeout/src/test/resources/lib-MaskPortTest.json diff --git a/macros/src/test/resources/lib-WriteEnableTest.json b/tapeout/src/test/resources/lib-WriteEnableTest.json similarity index 100% rename from macros/src/test/resources/lib-WriteEnableTest.json rename to tapeout/src/test/resources/lib-WriteEnableTest.json diff --git a/macros/src/test/scala/barstools/macros/CostFunction.scala b/tapeout/src/test/scala/barstools/macros/CostFunction.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/CostFunction.scala rename to tapeout/src/test/scala/barstools/macros/CostFunction.scala diff --git a/macros/src/test/scala/barstools/macros/Functional.scala b/tapeout/src/test/scala/barstools/macros/Functional.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/Functional.scala rename to tapeout/src/test/scala/barstools/macros/Functional.scala diff --git a/macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala b/tapeout/src/test/scala/barstools/macros/MacroCompilerSpec.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/MacroCompilerSpec.scala rename to tapeout/src/test/scala/barstools/macros/MacroCompilerSpec.scala diff --git a/macros/src/test/scala/barstools/macros/Masks.scala b/tapeout/src/test/scala/barstools/macros/Masks.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/Masks.scala rename to tapeout/src/test/scala/barstools/macros/Masks.scala diff --git a/macros/src/test/scala/barstools/macros/MultiPort.scala b/tapeout/src/test/scala/barstools/macros/MultiPort.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/MultiPort.scala rename to tapeout/src/test/scala/barstools/macros/MultiPort.scala diff --git a/macros/src/test/scala/barstools/macros/SRAMCompiler.scala b/tapeout/src/test/scala/barstools/macros/SRAMCompiler.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/SRAMCompiler.scala rename to tapeout/src/test/scala/barstools/macros/SRAMCompiler.scala diff --git a/macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala b/tapeout/src/test/scala/barstools/macros/SimpleSplitDepth.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/SimpleSplitDepth.scala rename to tapeout/src/test/scala/barstools/macros/SimpleSplitDepth.scala diff --git a/macros/src/test/scala/barstools/macros/SimpleSplitWidth.scala b/tapeout/src/test/scala/barstools/macros/SimpleSplitWidth.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/SimpleSplitWidth.scala rename to tapeout/src/test/scala/barstools/macros/SimpleSplitWidth.scala diff --git a/macros/src/test/scala/barstools/macros/SpecificExamples.scala b/tapeout/src/test/scala/barstools/macros/SpecificExamples.scala similarity index 99% rename from macros/src/test/scala/barstools/macros/SpecificExamples.scala rename to tapeout/src/test/scala/barstools/macros/SpecificExamples.scala index 334e3a73e..6ee7255a5 100644 --- a/macros/src/test/scala/barstools/macros/SpecificExamples.scala +++ b/tapeout/src/test/scala/barstools/macros/SpecificExamples.scala @@ -27,7 +27,7 @@ class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { val lib = s"lib-WriteEnableTest.json" // lib. of mems to create it val v = s"WriteEnableTest.json" - override val libPrefix = "macros/src/test/resources" + override val libPrefix = "tapeout/src/test/resources" val memSRAMs = mdf.macrolib.Utils .readMDFFromString(""" @@ -97,7 +97,7 @@ class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { val lib = s"lib-MaskPortTest.json" // lib. of mems to create it val v = s"MaskPortTest.json" - override val libPrefix = "macros/src/test/resources" + override val libPrefix = "tapeout/src/test/resources" val memSRAMs = mdf.macrolib.Utils .readMDFFromString(""" @@ -181,7 +181,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { val lib = s"lib-BOOMTest.json" val v = s"BOOMTest.v" - override val libPrefix = "macros/src/test/resources" + override val libPrefix = "tapeout/src/test/resources" val memSRAMs = mdf.macrolib.Utils .readMDFFromString(""" diff --git a/macros/src/test/scala/barstools/macros/SynFlops.scala b/tapeout/src/test/scala/barstools/macros/SynFlops.scala similarity index 100% rename from macros/src/test/scala/barstools/macros/SynFlops.scala rename to tapeout/src/test/scala/barstools/macros/SynFlops.scala diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala index cefd97595..ec4822fec 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala @@ -62,7 +62,7 @@ class GenerateSpec extends AnyFreeSpec { FileUtils.makeDirectory(targetDir) val printWriter = new PrintWriter(new File(s"$targetDir/GenerateExampleTester.fir")) - printWriter.write((new ChiselStage()).emitFirrtl(new GenerateExampleTester)) + printWriter.write((new ChiselStage()).emitFirrtl(new GenerateExampleTester, Array("--target-dir", targetDir))) printWriter.close() val blackBoxInverterText = """ diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala index 701c78453..d18053f09 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala @@ -22,7 +22,7 @@ class ExampleModuleNeedsResetInverted extends Module with ResetInverter { class ResetNSpec extends AnyFreeSpec with Matchers { "Inverting reset needs to be done throughout module in Chirrtl" in { - val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted) + val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--target-dir", "test_run_dir/reset_n_spec")) chirrtl should include("input reset :") (chirrtl should not).include("input reset_n :") (chirrtl should not).include("node reset = not(reset_n)") @@ -32,7 +32,7 @@ class ResetNSpec extends AnyFreeSpec with Matchers { // generate low-firrtl val firrtl = (new ChiselStage) .execute( - Array("-X", "low"), + Array("-X", "low", "--target-dir", "test_run_dir/reset_inverting_spec"), Seq(ChiselGeneratorAnnotation(() => new ExampleModuleNeedsResetInverted)) ) .collect { diff --git a/tapeout/src/test/scala/mdf/macrolib/ConfReaderSpec.scala b/tapeout/src/test/scala/mdf/macrolib/ConfReaderSpec.scala new file mode 100644 index 000000000..58680cd78 --- /dev/null +++ b/tapeout/src/test/scala/mdf/macrolib/ConfReaderSpec.scala @@ -0,0 +1,101 @@ +package mdf.macrolib + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class ConfReaderSpec extends AnyFlatSpec with Matchers { + + /** Generate a read port in accordance with RenameAnnotatedMemoryPorts. */ + def generateReadPort(num: Int, width: Int, depth: Int): MacroPort = { + MacroPort( + address = PolarizedPort(s"R${num}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"R${num}_clk", PositiveEdge)), + output = Some(PolarizedPort(s"R${num}_data", ActiveHigh)), + width = Some(width), + depth = Some(depth) + ) + } + + /** Generate a write port in accordance with RenameAnnotatedMemoryPorts. */ + def generateWritePort(num: Int, width: Int, depth: Int, maskGran: Option[Int] = None): MacroPort = { + MacroPort( + address = PolarizedPort(s"W${num}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"W${num}_clk", PositiveEdge)), + input = Some(PolarizedPort(s"W${num}_data", ActiveHigh)), + maskPort = if (maskGran.isDefined) Some(PolarizedPort(s"W${num}_mask", ActiveHigh)) else None, + maskGran = maskGran, + width = Some(184), + depth = Some(128) + ) + } + + "ConfReader" should "read a 1rw conf line" in { + val confStr = "name Foo_Bar_mem123_ext depth 128 width 184 ports mrw mask_gran 23" + ConfReader.readSingleLine(confStr) shouldBe SRAMMacro( + name = "Foo_Bar_mem123_ext", + width = 184, + depth = 128, + family = "1rw", + ports = List( + MacroPort( + address = PolarizedPort("RW0_addr", ActiveHigh), + clock = Some(PolarizedPort("RW0_clk", PositiveEdge)), + writeEnable = Some(PolarizedPort("RW0_wmode", ActiveHigh)), + output = Some(PolarizedPort("RW0_wdata", ActiveHigh)), + input = Some(PolarizedPort("RW0_rdata", ActiveHigh)), + maskPort = Some(PolarizedPort("RW0_wmask", ActiveHigh)), + maskGran = Some(23), + width = Some(184), + depth = Some(128) + ) + ), + extraPorts = List() + ) + } + + "ConfReader" should "read a 1r1w conf line" in { + val confStr = "name Foo_Bar_mem321_ext depth 128 width 184 ports read,mwrite mask_gran 23" + ConfReader.readSingleLine(confStr) shouldBe SRAMMacro( + name = "Foo_Bar_mem321_ext", + width = 184, + depth = 128, + family = "1r1w", + ports = List( + generateReadPort(0, 184, 128), + generateWritePort(0, 184, 128, Some(23)) + ), + extraPorts = List() + ) + } + + "ConfReader" should "read a mixed 1r2w conf line" in { + val confStr = "name Foo_Bar_mem321_ext depth 128 width 184 ports read,mwrite,write mask_gran 23" + ConfReader.readSingleLine(confStr) shouldBe SRAMMacro( + name = "Foo_Bar_mem321_ext", + width = 184, + depth = 128, + family = "1r2w", + ports = List( + generateReadPort(0, 184, 128), + generateWritePort(0, 184, 128, Some(23)), + generateWritePort(1, 184, 128) + ), + extraPorts = List() + ) + } + + "ConfReader" should "read a 42r29w conf line" in { + val confStr = + "name Foo_Bar_mem321_ext depth 128 width 184 ports " + (Seq.fill(42)("read") ++ Seq.fill(29)("mwrite")) + .mkString(",") + " mask_gran 23" + ConfReader.readSingleLine(confStr) shouldBe SRAMMacro( + name = "Foo_Bar_mem321_ext", + width = 184, + depth = 128, + family = "42r29w", + ports = ((0 to 41).map((num: Int) => generateReadPort(num, 184, 128))) ++ + ((0 to 28).map((num: Int) => generateWritePort(num, 184, 128, Some(23)))), + extraPorts = List() + ) + } +} diff --git a/tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala b/tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala new file mode 100644 index 000000000..ba51e4d14 --- /dev/null +++ b/tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala @@ -0,0 +1,14 @@ +package mdf.macrolib + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class FlipChipMacroSpec extends AnyFlatSpec with Matchers { + "Parsing flipchipmacros" should "work" in { + val stream = getClass.getResourceAsStream("/bumps.json") + val mdf = Utils.readMDFFromString(scala.io.Source.fromInputStream(stream).getLines().mkString("\n")) + mdf match { + case Some(Seq(fcp: FlipChipMacro)) => println(fcp.visualize) + } + } +} diff --git a/tapeout/src/test/scala/mdf/macrolib/IOMacroSpec.scala b/tapeout/src/test/scala/mdf/macrolib/IOMacroSpec.scala new file mode 100644 index 000000000..c6ab6e104 --- /dev/null +++ b/tapeout/src/test/scala/mdf/macrolib/IOMacroSpec.scala @@ -0,0 +1,67 @@ +package mdf.macrolib + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class IOMacroSpec extends AnyFlatSpec with Matchers { + "Ground IOs" should "be detected" in { + val json = + """{ + | "name" : "GND", + | "type" : "ground" + |}""".stripMargin + val m = JSONUtils.readStringValueMap(json).get + IOMacro.parseJSON(m) shouldBe Some(IOMacro("GND", Ground)) + } + "Power IOs" should "be detected" in { + val json = + """{ + | "name" : "VDD0V8", + | "type" : "power" + |}""".stripMargin + val m = JSONUtils.readStringValueMap(json).get + IOMacro.parseJSON(m) shouldBe Some(IOMacro("VDD0V8", Power)) + } + "Digital IOs" should "be detected" in { + val json = + """{ + | "name" : "VDDC0_SEL[1:0]", + | "type" : "digital", + | "direction" : "output", + | "termination" : "CMOS" + |}""".stripMargin + val m = JSONUtils.readStringValueMap(json).get + IOMacro.parseJSON(m) shouldBe Some(IOMacro("VDDC0_SEL[1:0]", Digital, Some(Output), Some(CMOS))) + } + "Digital IOs with termination" should "be detected" in { + val json = + """{ + | "name" : "CCLK1", + | "type" : "digital", + | "direction" : "input", + | "termination" : 50, + | "terminationType" : "single", + | "terminationReference" : "GND" + |}""".stripMargin + val m = JSONUtils.readStringValueMap(json).get + IOMacro.parseJSON(m) shouldBe Some( + IOMacro("CCLK1", Digital, Some(Input), Some(Resistive(50)), Some(Single), Some("GND")) + ) + } + "Digital IOs with matching and termination" should "be detected" in { + val json = + """{ + | "name" : "REFCLK0P", + | "type" : "analog", + | "direction" : "input", + | "match" : ["REFCLK0N"], + | "termination" : 100, + | "terminationType" : "differential", + | "terminationReference" : "GND" + |}""".stripMargin + val m = JSONUtils.readStringValueMap(json).get + IOMacro.parseJSON(m) shouldBe Some( + IOMacro("REFCLK0P", Analog, Some(Input), Some(Resistive(100)), Some(Differential), Some("GND"), List("REFCLK0N")) + ) + } +} diff --git a/tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala b/tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala new file mode 100644 index 000000000..ffd13be5d --- /dev/null +++ b/tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala @@ -0,0 +1,14 @@ +package mdf.macrolib + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class IOPropertiesSpec extends AnyFlatSpec with Matchers { + "Parsing io_properties" should "work" in { + val stream = getClass.getResourceAsStream("/io_properties.json") + val mdf = Utils.readMDFFromString(scala.io.Source.fromInputStream(stream).getLines().mkString("\n")) + mdf match { + case Some(Seq(fcp: IOProperties)) => + } + } +} diff --git a/tapeout/src/test/scala/mdf/macrolib/MacroLibOutput.scala b/tapeout/src/test/scala/mdf/macrolib/MacroLibOutput.scala new file mode 100644 index 000000000..85feaffaf --- /dev/null +++ b/tapeout/src/test/scala/mdf/macrolib/MacroLibOutput.scala @@ -0,0 +1,270 @@ +package mdf.macrolib + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import play.api.libs.json._ + +import java.io.File + +// Output tests (Scala -> JSON). +// TODO: unify these tests with the input tests? + +trait HasAwesomeMemData { + def getAwesomeMem() = { + SRAMMacro( + name = "awesome_mem", + width = 32, + depth = 1024, + family = "1rw", + ports = Seq( + MacroPort( + address = PolarizedPort(name = "addr", polarity = ActiveHigh), + clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), + writeEnable = Some(PolarizedPort(name = "write_enable", polarity = ActiveHigh)), + readEnable = Some(PolarizedPort(name = "read_enable", polarity = ActiveHigh)), + chipEnable = Some(PolarizedPort(name = "chip_enable", polarity = ActiveHigh)), + output = Some(PolarizedPort(name = "data_out", polarity = ActiveHigh)), + input = Some(PolarizedPort(name = "data_in", polarity = ActiveHigh)), + maskPort = Some(PolarizedPort(name = "mask", polarity = ActiveHigh)), + maskGran = Some(8), + width = Some(32), + depth = Some(1024) // These numbers don't matter. + ) + ), + extraPorts = List() + ) + } + + def getAwesomeMemJSON(): String = { + """ + | { + | "type": "sram", + | "name": "awesome_mem", + | "width": 32, + | "depth": "1024", + | "mux": 1, + | "mask":true, + | "family": "1rw", + | "ports": [ + | { + | "address port name": "addr", + | "address port polarity": "active high", + | "clock port name": "clk", + | "clock port polarity": "positive edge", + | "write enable port name": "write_enable", + | "write enable port polarity": "active high", + | "read enable port name": "read_enable", + | "read enable port polarity": "active high", + | "chip enable port name": "chip_enable", + | "chip enable port polarity": "active high", + | "output port name": "data_out", + | "output port polarity": "active high", + | "input port name": "data_in", + | "input port polarity": "active high", + | "mask port name": "mask", + | "mask port polarity": "active high", + | "mask granularity": 8 + | } + | ] + | } + |""".stripMargin + } +} + +// Tests for filler macros. +class FillerMacroOutput extends AnyFlatSpec with Matchers { + "Valid lvt macro" should "be generated" in { + val expected = """ + | { + | "type": "filler cell", + | "name": "MY_FILLER_CELL", + | "vt": "lvt" + | } + |""".stripMargin + FillerMacro("MY_FILLER_CELL", "lvt").toJSON shouldBe Json.parse(expected) + } + + "Valid metal macro" should "be generated" in { + val expected = """ + | { + | "type": "metal filler cell", + | "name": "METAL_FILLER_CELL", + | "vt": "lvt" + | } + |""".stripMargin + MetalFillerMacro("METAL_FILLER_CELL", "lvt").toJSON shouldBe Json.parse(expected) + } + + "Valid hvt macro" should "be generated" in { + val expected = """ + | { + | "type": "filler cell", + | "name": "HVT_CELL_PROP", + | "vt": "hvt" + | } + |""".stripMargin + FillerMacro("HVT_CELL_PROP", "hvt").toJSON shouldBe Json.parse(expected) + } +} + +class SRAMPortOutput extends AnyFlatSpec with Matchers { + "Extra port" should "be generated" in { + val m = MacroExtraPort( + name = "TIE_HIGH", + width = 8, + portType = Constant, + value = ((1 << 8) - 1) + ) + val expected = """ + | { + | "type": "constant", + | "name": "TIE_HIGH", + | "width": 8, + | "value": 255 + | } + |""".stripMargin + m.toJSON shouldBe Json.parse(expected) + } + + "Minimal write port" should "be generated" in { + val m = MacroPort( + address = PolarizedPort(name = "addr", polarity = ActiveHigh), + clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), + writeEnable = Some(PolarizedPort(name = "write_enable", polarity = ActiveHigh)), + input = Some(PolarizedPort(name = "data_in", polarity = ActiveHigh)), + width = Some(32), + depth = Some(1024) // These numbers don't matter. + ) + val expected = """ + | { + | "address port name": "addr", + | "address port polarity": "active high", + | "clock port name": "clk", + | "clock port polarity": "positive edge", + | "write enable port name": "write_enable", + | "write enable port polarity": "active high", + | "input port name": "data_in", + | "input port polarity": "active high" + | } + |""".stripMargin + m.toJSON shouldBe Json.parse(expected) + } + + "Minimal read port" should "be generated" in { + val m = MacroPort( + address = PolarizedPort(name = "addr", polarity = ActiveHigh), + clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), + output = Some(PolarizedPort(name = "data_out", polarity = ActiveHigh)), + width = Some(32), + depth = Some(1024) // These numbers don't matter. + ) + val expected = """ + | { + | "address port name": "addr", + | "address port polarity": "active high", + | "clock port name": "clk", + | "clock port polarity": "positive edge", + | "output port name": "data_out", + | "output port polarity": "active high" + | } + |""".stripMargin + m.toJSON shouldBe Json.parse(expected) + } + + "Masked read port" should "be generated" in { + val m = MacroPort( + address = PolarizedPort(name = "addr", polarity = ActiveHigh), + clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), + output = Some(PolarizedPort(name = "data_out", polarity = ActiveHigh)), + maskPort = Some(PolarizedPort(name = "mask", polarity = ActiveHigh)), + maskGran = Some(8), + width = Some(32), + depth = Some(1024) // These numbers don't matter. + ) + val expected = """ + | { + | "address port name": "addr", + | "address port polarity": "active high", + | "clock port name": "clk", + | "clock port polarity": "positive edge", + | "output port name": "data_out", + | "output port polarity": "active high", + | "mask port name": "mask", + | "mask port polarity": "active high", + | "mask granularity": 8 + | } + |""".stripMargin + m.toJSON shouldBe Json.parse(expected) + } + + "Everything port" should "be generated" in { + val m = MacroPort( + address = PolarizedPort(name = "addr", polarity = ActiveHigh), + clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), + writeEnable = Some(PolarizedPort(name = "write_enable", polarity = ActiveHigh)), + readEnable = Some(PolarizedPort(name = "read_enable", polarity = ActiveHigh)), + chipEnable = Some(PolarizedPort(name = "chip_enable", polarity = ActiveHigh)), + output = Some(PolarizedPort(name = "data_out", polarity = ActiveHigh)), + input = Some(PolarizedPort(name = "data_in", polarity = ActiveHigh)), + maskPort = Some(PolarizedPort(name = "mask", polarity = ActiveHigh)), + maskGran = Some(8), + width = Some(32), + depth = Some(1024) // These numbers don't matter. + ) + val expected = """ + | { + | "address port name": "addr", + | "address port polarity": "active high", + | "clock port name": "clk", + | "clock port polarity": "positive edge", + | "write enable port name": "write_enable", + | "write enable port polarity": "active high", + | "read enable port name": "read_enable", + | "read enable port polarity": "active high", + | "chip enable port name": "chip_enable", + | "chip enable port polarity": "active high", + | "output port name": "data_out", + | "output port polarity": "active high", + | "input port name": "data_in", + | "input port polarity": "active high", + | "mask port name": "mask", + | "mask port polarity": "active high", + | "mask granularity": 8 + | } + |""".stripMargin + m.toJSON shouldBe Json.parse(expected) + } +} + +class SRAMMacroOutput extends AnyFlatSpec with Matchers with HasAwesomeMemData { + "SRAM macro" should "be generated" in { + val m = getAwesomeMem + val expected = getAwesomeMemJSON + m.toJSON shouldBe Json.parse(expected) + } +} + +class InputOutput extends AnyFlatSpec with Matchers with HasAwesomeMemData { + "Read-write string" should "preserve data" in { + val mdf = List( + FillerMacro("MY_FILLER_CELL", "lvt"), + MetalFillerMacro("METAL_GEAR_FILLER", "hvt"), + getAwesomeMem + ) + Utils.readMDFFromString(Utils.writeMDFToString(mdf)) shouldBe Some(mdf) + } + + val testDir: String = "test_run_dir" + new File(testDir).mkdirs // Make sure the testDir exists + + "Read-write file" should "preserve data" in { + val mdf = List( + FillerMacro("MY_FILLER_CELL", "lvt"), + MetalFillerMacro("METAL_GEAR_FILLER", "hvt"), + getAwesomeMem + ) + val filename = testDir + "/" + "mdf_read_write_test.json" + Utils.writeMDFToPath(Some(filename), mdf) shouldBe true + Utils.readMDFFromPath(Some(filename)) shouldBe Some(mdf) + } +} diff --git a/tapeout/src/test/scala/mdf/macrolib/MacroLibSpec.scala b/tapeout/src/test/scala/mdf/macrolib/MacroLibSpec.scala new file mode 100644 index 000000000..fd3210bb2 --- /dev/null +++ b/tapeout/src/test/scala/mdf/macrolib/MacroLibSpec.scala @@ -0,0 +1,406 @@ +package mdf.macrolib + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import play.api.libs.json._ + +object JSONUtils { + def readStringValueMap(str: String): Option[Map[String, JsValue]] = { + Json.parse(str) match { + case x: JsObject => Some(x.as[Map[String, JsValue]]) + case _ => None + } + } +} + +// Tests for filler macros +class FillerMacroSpec extends AnyFlatSpec with Matchers { + "Valid lvt macros" should "be detected" in { + val m = JSONUtils + .readStringValueMap(""" + | { + | "type": "filler cell", + | "name": "MY_FILLER_CELL", + | "vt": "lvt" + | } + |""".stripMargin) + .get + FillerMacroBase.parseJSON(m) shouldBe Some(FillerMacro("MY_FILLER_CELL", "lvt")) + } + + "Valid metal macro" should "be detected" in { + val m = JSONUtils + .readStringValueMap(""" + | { + | "type": "metal filler cell", + | "name": "METAL_FILLER_CELL", + | "vt": "lvt" + | } + |""".stripMargin) + .get + FillerMacroBase.parseJSON(m) shouldBe Some(MetalFillerMacro("METAL_FILLER_CELL", "lvt")) + } + + "Valid hvt macros" should "be detected" in { + val m = JSONUtils + .readStringValueMap(""" + | { + | "type": "filler cell", + | "name": "HVT_CELL_PROP", + | "vt": "hvt" + | } + |""".stripMargin) + .get + FillerMacroBase.parseJSON(m) shouldBe Some(FillerMacro("HVT_CELL_PROP", "hvt")) + } + + "Empty name macros" should "be rejected" in { + val m = JSONUtils + .readStringValueMap(""" + | { + | "type": "filler cell", + | "name": "", + | "vt": "hvt" + | } + |""".stripMargin) + .get + FillerMacroBase.parseJSON(m) shouldBe None + } + + "Empty vt macros" should "be rejected" in { + val m = JSONUtils + .readStringValueMap(""" + | { + | "type": "metal filler cell", + | "name": "DEAD_CELL", + | "vt": "" + | } + |""".stripMargin) + .get + FillerMacroBase.parseJSON(m) shouldBe None + } + + "Missing vt macros" should "be rejected" in { + val m = JSONUtils + .readStringValueMap(""" + | { + | "type": "metal filler cell", + | "name": "DEAD_CELL" + | } + |""".stripMargin) + .get + FillerMacroBase.parseJSON(m) shouldBe None + } + + "Missing name macros" should "be rejected" in { + val m = JSONUtils + .readStringValueMap(""" + | { + | "type": "filler cell", + | "vt": "" + | } + |""".stripMargin) + .get + FillerMacroBase.parseJSON(m) shouldBe None + } +} + +// Tests for SRAM type and associates. +class SRAMMacroSpec extends AnyFlatSpec with Matchers { + // Simple port which can be reused in tests + // Note: assume width=depth=simplePortConstant. + val simplePortConstant = 1024 + def simplePort( + postfix: String = "", + width: Int = simplePortConstant, + depth: Int = simplePortConstant + ): (String, MacroPort) = { + val json = s""" + { + "address port name": "A_${postfix}", + "address port polarity": "active high", + "clock port name": "CLK_${postfix}", + "clock port polarity": "positive edge", + "write enable port name": "WEN_${postfix}", + "write enable port polarity": "active high", + "read enable port name": "REN_${postfix}", + "read enable port polarity": "active high", + "chip enable port name": "CEN_${postfix}", + "chip enable port polarity": "active high", + "output port name": "OUT_${postfix}", + "output port polarity": "active high", + "input port name": "IN_${postfix}", + "input port polarity": "active high", + "mask granularity": 1, + "mask port name": "MASK_${postfix}", + "mask port polarity": "active high" + } + """ + val port = MacroPort( + address = PolarizedPort(s"A_${postfix}", ActiveHigh), + clock = Some(PolarizedPort(s"CLK_${postfix}", PositiveEdge)), + writeEnable = Some(PolarizedPort(s"WEN_${postfix}", ActiveHigh)), + readEnable = Some(PolarizedPort(s"REN_${postfix}", ActiveHigh)), + chipEnable = Some(PolarizedPort(s"CEN_${postfix}", ActiveHigh)), + output = Some(PolarizedPort(s"OUT_${postfix}", ActiveHigh)), + input = Some(PolarizedPort(s"IN_${postfix}", ActiveHigh)), + maskPort = Some(PolarizedPort(s"MASK_${postfix}", ActiveHigh)), + maskGran = Some(1), + width = Some(width), + depth = Some(depth) + ) + (json, port) + } + "Simple port" should "be valid" in { + { + val (json, port) = simplePort("Simple1") + MacroPort.parseJSON(JSONUtils.readStringValueMap(json).get, simplePortConstant, simplePortConstant) shouldBe Some( + port + ) + } + { + val (json, port) = simplePort("Simple2") + MacroPort.parseJSON(JSONUtils.readStringValueMap(json).get, simplePortConstant, simplePortConstant) shouldBe Some( + port + ) + } + { + val (json, port) = simplePort("bar") + MacroPort.parseJSON(JSONUtils.readStringValueMap(json).get, simplePortConstant, simplePortConstant) shouldBe Some( + port + ) + } + { + val (json, port) = simplePort("") + MacroPort.parseJSON(JSONUtils.readStringValueMap(json).get, simplePortConstant, simplePortConstant) shouldBe Some( + port + ) + } + } + + "Simple SRAM macro" should "be detected" in { + val (json, port) = simplePort("", 2048, 4096) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "SRAMS_R_US", + "width": 2048, + "depth": "4096", + "family": "1rw", + "ports": [ + ${json} + ] +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe Some( + SRAMMacro("SRAMS_R_US", width = 2048, depth = 4096, family = "1rw", ports = List(port), extraPorts = List()) + ) + } + + "Non-power-of-two width & depth SRAM macro" should "be detected" in { + val (json, port) = simplePort("", 1234, 8888) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "SRAMS_R_US", + "width": 1234, + "depth": "8888", + "family": "1rw", + "ports": [ + ${json} + ] +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe Some( + SRAMMacro("SRAMS_R_US", width = 1234, depth = 8888, family = "1rw", ports = List(port), extraPorts = List()) + ) + } + + "Minimal memory port" should "be detected" in { + val (json, port) = simplePort("_A", 64, 1024) + val port2 = MacroPort( + address = PolarizedPort("A_B", ActiveHigh), + clock = Some(PolarizedPort("CLK_B", PositiveEdge)), + writeEnable = Some(PolarizedPort("WEN_B", ActiveHigh)), + readEnable = None, + chipEnable = None, + output = Some(PolarizedPort("OUT_B", ActiveHigh)), + input = Some(PolarizedPort("IN_B", ActiveHigh)), + maskPort = None, + maskGran = None, + width = Some(64), + depth = Some(1024) + ) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "SRAMS_R_US", + "width": 64, + "depth": "1024", + "family": "2rw", + "ports": [ + ${json}, + { + "address port name": "A_B", + "address port polarity": "active high", + "clock port name": "CLK_B", + "clock port polarity": "positive edge", + "write enable port name": "WEN_B", + "write enable port polarity": "active high", + "output port name": "OUT_B", + "output port polarity": "active high", + "input port name": "IN_B", + "input port polarity": "active high" + } + ] +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe Some( + SRAMMacro("SRAMS_R_US", width = 64, depth = 1024, family = "2rw", ports = List(port, port2), extraPorts = List()) + ) + } + + "Extra ports" should "be detected" in { + val (json, port) = simplePort("", 2048, 4096) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "GOT_EXTRA", + "width": 2048, + "depth": "4096", + "family": "1rw", + "ports": [ + ${json} + ], + "extra ports": [ + { + "name": "TIE_DIE", + "width": 1, + "type": "constant", + "value": 1 + }, + { + "name": "TIE_MOO", + "width": 4, + "type": "constant", + "value": 0 + } + ] +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe Some( + SRAMMacro( + "GOT_EXTRA", + width = 2048, + depth = 4096, + family = "1rw", + ports = List(port), + extraPorts = List( + MacroExtraPort( + name = "TIE_DIE", + width = 1, + portType = Constant, + value = 1 + ), + MacroExtraPort( + name = "TIE_MOO", + width = 4, + portType = Constant, + value = 0 + ) + ) + ) + ) + } + + "Invalid port" should "be rejected" in { + val (json, port) = simplePort("", 2048, 4096) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "SRAMS_R_US", + "width": 2048, + "depth": "4096", + "family": "1rw", + "ports": [ + { + "address port name": "missing_polarity", + "output port name": "missing_clock" + } + ] +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe None + } + + "No ports" should "be rejected" in { + val (json, port) = simplePort("", 2048, 4096) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "SRAMS_R_US", + "width": 2048, + "depth": "4096", + "family": "1rw" +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe None + } + + "No family and ports" should "be rejected" in { + val (json, port) = simplePort("", 2048, 4096) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "SRAMS_R_US", + "width": 2048, + "depth": "4096" +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe None + } + + "String width" should "be rejected" in { + val (json, port) = simplePort("", 2048, 4096) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "BAD_BAD_SRAM", + "width": "wide", + "depth": "4096" +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe None + } + + "String depth" should "be rejected" in { + val (json, port) = simplePort("", 2048, 4096) + val m = JSONUtils + .readStringValueMap(s""" +{ + "type": "sram", + "name": "BAD_BAD_SRAM", + "width": 512, + "depth": "octopus_under_the_sea" +} + """) + .get + SRAMMacro.parseJSON(m) shouldBe None + } +} From db2739bb6c7901ad1f0cb05ed5a4a0b81f304374 Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 9 Aug 2021 09:47:20 -0700 Subject: [PATCH 220/273] iocell won't run without this syntax IntelliJ flags this as error, even though it compiles and runs --- tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala b/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala index 47b7cf2e7..d244d2984 100644 --- a/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -148,7 +148,7 @@ object IOCell { padSignal: T, name: Option[String] = None, typeParams: IOCellTypeParams = GenericIOCellParams(), - concretizeResetFn: (Reset) => R = toSyncReset _ + concretizeResetFn: (Reset) => R = toSyncReset ): Seq[IOCell] = { def genCell[T <: Data]( castToBool: (T) => Bool, From b107a6bdf3a1b76c350d431b65c9c6ab3cf3fc03 Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 9 Aug 2021 10:10:10 -0700 Subject: [PATCH 221/273] Add some missing resources --- build.sbt | 10 +- tapeout/src/test/resources/bumps.json | 41 ++ tapeout/src/test/resources/io_properties.json | 663 ++++++++++++++++++ 3 files changed, 711 insertions(+), 3 deletions(-) create mode 100644 tapeout/src/test/resources/bumps.json create mode 100644 tapeout/src/test/resources/io_properties.json diff --git a/build.sbt b/build.sbt index 1fe835056..45723331d 100644 --- a/build.sbt +++ b/build.sbt @@ -24,12 +24,16 @@ lazy val commonSettings = Seq( ) ) -disablePlugins(sbtassembly.AssemblyPlugin) - -enablePlugins(sbtassembly.AssemblyPlugin) +//disablePlugins(sbtassembly.AssemblyPlugin) +// +//enablePlugins(sbtassembly.AssemblyPlugin) lazy val tapeout = (project in file("tapeout")) .settings(commonSettings) .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) + .settings( + mainClass := Some("barstools.macros.MacroCompiler") + ) + .enablePlugins(sbtassembly.AssemblyPlugin) lazy val root = (project in file(".")).aggregate(tapeout) diff --git a/tapeout/src/test/resources/bumps.json b/tapeout/src/test/resources/bumps.json new file mode 100644 index 000000000..21b93381c --- /dev/null +++ b/tapeout/src/test/resources/bumps.json @@ -0,0 +1,41 @@ +[ + { + "name" : "example", + "type" : "flipchip", + "bump_dimensions" : [27,27], + "bump_locations" : [ + ["-", "GND", "VDDC0_SEL[0]", "VDDC0_SEL[1]", "VDDC1_SEL[0]", "VDDC1_SEL[1]", "VDDC2_SEL[0]", "VDDC2_SEL[1]", "VDDC3_SEL[0]", "VDDC3_SEL[1]", "VDDC0_EN", "VDDC1_EN", "VDDC2_EN", "VDDC3_EN", "CCLK0", "CCLK1", "CCLK2", "RESET", "BOOT", "I2C_SDA", "I2C_SCL", "SPI_SCLK", "SPI_MOSI", "SPI_MISO", "SPI_SS_L", "GND", "-"], + [ "GND", "", "", "", "GND", "GND","GPIO[1]", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8","UART_RX","UART_TX", "GND", "GND", "GND", "GND", "", "", "", "GND"], + + ["TXP0", "VDDA", "VDDA", "GND", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "GND", "VDDA", "VDDA", "TXP4"], + ["TXN0", "VDDA", "VDDA", "GND", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "GND", "VDDA", "VDDA", "TXN4"], + [ "GND", "", "", "", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "", "", "", "GND"], + ["RXP0", "VDDA", "VDDA", "GND", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "GND", "VDDA", "VDDA", "RXP4"], + ["RXN0", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "RXN4"], + [ "GND", "", "", "", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "", "", "", "GND"], + + ["TXP1", "VDDA", "VDDA", "GND", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "GND", "VDDA", "VDDA", "TXP5"], + ["TXN1", "VDDA", "VDDA", "GND", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "GND", "VDDA", "VDDA", "TXN5"], + [ "GND", "", "", "", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "", "", "", "GND"], + ["RXP1", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "RXP5"], + ["RXN1", "VDDA", "VDDA", "GND", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "GND", "GND", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "GND", "VDDA", "VDDA", "RXN5"], + [ "GND", "", "", "", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "GND", "GND", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "", "", "", "GND"], + + ["TXP2", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "TXP6"], + ["TXN2", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "TXN6"], + [ "GND", "", "", "", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "", "", "", "GND"], + ["RXP2", "VDDA", "VDDA", "GND", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "GND", "VDDA", "VDDA", "RXP6"], + ["RXN2", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "RXN6"], + [ "GND", "", "", "", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "", "", "", "GND"], + + ["TXP3", "VDDA", "VDDA", "GND", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "GND", "VDDA", "VDDA", "TXP7"], + ["TXN3", "VDDA", "VDDA", "GND", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "GND", "VDDA", "VDDA", "TXN7"], + [ "GND", "", "", "", "-", "SERIAL_IN_READY", "-", "-", "SERIAL_IN_VALID", "-", "-", "-", "-", "", "-", "SERIAL_OUT_VALID", "-", "-", "SERIAL_OUT_READY", "-", "-", "GPIO[0]", "-", "", "", "", "GND"], + ["RXP3", "VDDA", "VDDA", "GND", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "GND", "VDDA", "VDDA", "RXP7"], + ["RXN3", "VDDA", "VDDA", "GND", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "GND", "VDDA", "VDDA", "RXN7"], + + [ "GND", "", "", "", "GND", "GND", "GND", "GND", "GND", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "GND", "GND", "GND", "GND", "GND", "", "", "", "GND"], + ["-", "GND", "REFCLK0P", "REFCLK0N", "GND", "SERIAL_OUT[0]", "SERIAL_OUT[1]", "SERIAL_OUT[2]", "SERIAL_OUT[3]", "SERIAL_IN[0]", "SERIAL_IN[1]", "SERIAL_IN[2]", "SERIAL_IN[3]", "JTAG_TMS", "JTAG_TCK", "JTAG_TDO", "JTAG_TDI", "CLKSEL", "PLLCLK_OUT", "GND", "PLLREFCLKP", "PLLREFCLKN", "GND", "REFCLK1P", "REFCLK1N", "GND", "-"] + ] + } +] diff --git a/tapeout/src/test/resources/io_properties.json b/tapeout/src/test/resources/io_properties.json new file mode 100644 index 000000000..93b945a39 --- /dev/null +++ b/tapeout/src/test/resources/io_properties.json @@ -0,0 +1,663 @@ +[ + { + "name": "My IOs", + "type": "io_properties", + "top": "EAGLE", + "ios": [ + { + "name": "GND", + "type": "ground" + }, + { + "name": "VDD0V8", + "type": "power" + }, + { + "name": "VDD1V8", + "type": "power" + }, + { + "name": "VDDC0", + "type": "power" + }, + { + "name": "VDDC1", + "type": "power" + }, + { + "name": "VDDC2", + "type": "power" + }, + { + "name": "VDDC3", + "type": "power" + }, + { + "name": "VDDA", + "type": "power" + }, + { + "name": "VDDC0_SEL[1:0]", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "VDDC1_SEL[1:0]", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "VDDC2_SEL[1:0]", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "VDDC3_SEL[1:0]", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "VDDDC0_EN", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "VDDDC1_EN", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "VDDDC2_EN", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "VDDDC3_EN", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "CCLK0", + "type": "digital", + "direction": "input", + "termination": 50, + "termination_type": "single", + "termination_reference": "GND" + }, + { + "name": "CCLK1", + "type": "digital", + "direction": "input", + "termination": 50, + "termination_type": "single", + "termination_reference": "GND" + }, + { + "name": "CCLK2", + "type": "digital", + "direction": "input", + "termination": 50, + "termination_type": "single", + "termination_reference": "GND" + }, + { + "name": "RESET", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "BOOT", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "I2C_SDA", + "type": "digital", + "direction": "inout", + "termination": "open-drain" + }, + { + "name": "I2C_SCL", + "type": "digital", + "direction": "inout", + "termination": "open-drain" + }, + { + "name": "SPI_SCLK", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "SPI_MOSI", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "SPI_MISO", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "SPI_SS_L", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "GPIO[1:0]", + "type": "digital", + "direction": "inout", + "termination": "CMOS" + }, + { + "name": "UART_RX", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "UART_TX", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "SERIAL_IN_READY", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "SERIAL_IN_VALID", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "SERIAL_OUT_READY", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "SERIAL_OUT_VALID", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "SERIAL_OUT[3:0]", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "SERIAL_IN[3:0]", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "REFCLK0P", + "type": "analog", + "direction": "input", + "match": [ + "REFCLK0N" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "REFCLK0N", + "type": "analog", + "direction": "input", + "match": [ + "REFCLK0P" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "REFCLK1N", + "type": "analog", + "direction": "input", + "match": [ + "REFCLK1P" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "REFCLK1P", + "type": "analog", + "direction": "input", + "match": [ + "REFCLK1N" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "PLLREFCLKP", + "type": "analog", + "direction": "input", + "match": [ + "PLLREFCLKP" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "PLLREFCLKN", + "type": "analog", + "direction": "input", + "match": [ + "PLLREFCLKP" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "JTAG_TMS", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "JTAG_TCK", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "JTAG_TDI", + "type": "digital", + "direction": "input", + "termination": "CMOS" + }, + { + "name": "JTAG_TDO", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "PLLCLK_OUT", + "type": "digital", + "direction": "output", + "termination": "CMOS" + }, + { + "name": "TXP0", + "type": "analog", + "direction": "output", + "match": [ + "TXN0" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXN0", + "type": "analog", + "direction": "output", + "match": [ + "TXP0" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXP0", + "type": "analog", + "direction": "output", + "match": [ + "RXN0" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXN1", + "type": "analog", + "direction": "input", + "match": [ + "RXP1" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXP1", + "type": "analog", + "direction": "output", + "match": [ + "TXN1" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXN1", + "type": "analog", + "direction": "output", + "match": [ + "TXP1" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXP1", + "type": "analog", + "direction": "output", + "match": [ + "RXN1" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXN1", + "type": "analog", + "direction": "input", + "match": [ + "RXP1" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXP2", + "type": "analog", + "direction": "output", + "match": [ + "TXN2" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXN2", + "type": "analog", + "direction": "output", + "match": [ + "TXP2" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXP2", + "type": "analog", + "direction": "output", + "match": [ + "RXN2" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXN2", + "type": "analog", + "direction": "input", + "match": [ + "RXP2" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXP3", + "type": "analog", + "direction": "output", + "match": [ + "TXN3" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXN3", + "type": "analog", + "direction": "output", + "match": [ + "TXP3" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXP3", + "type": "analog", + "direction": "output", + "match": [ + "RXN3" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXN3", + "type": "analog", + "direction": "input", + "match": [ + "RXP3" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXP4", + "type": "analog", + "direction": "output", + "match": [ + "TXN4" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXN4", + "type": "analog", + "direction": "output", + "match": [ + "TXP4" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXP4", + "type": "analog", + "direction": "output", + "match": [ + "RXN4" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXN4", + "type": "analog", + "direction": "input", + "match": [ + "RXP4" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXP5", + "type": "analog", + "direction": "output", + "match": [ + "TXN5" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXN5", + "type": "analog", + "direction": "output", + "match": [ + "TXP5" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXP5", + "type": "analog", + "direction": "output", + "match": [ + "RXN5" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXN5", + "type": "analog", + "direction": "input", + "match": [ + "RXP5" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXP6", + "type": "analog", + "direction": "output", + "match": [ + "TXN6" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXN6", + "type": "analog", + "direction": "output", + "match": [ + "TXP6" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXP6", + "type": "analog", + "direction": "output", + "match": [ + "RXN6" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXN6", + "type": "analog", + "direction": "input", + "match": [ + "RXP6" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXP7", + "type": "analog", + "direction": "output", + "match": [ + "TXN7" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "TXN7", + "type": "analog", + "direction": "output", + "match": [ + "TXP7" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXP7", + "type": "analog", + "direction": "output", + "match": [ + "RXN7" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + }, + { + "name": "RXN7", + "type": "analog", + "direction": "input", + "match": [ + "RXP7" + ], + "termination": 100, + "termination_type": "differential", + "termination_reference": "GND" + } + ] + } +] From e113a7b61977bfecbedddb564f23ee9d9b777b23 Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 9 Aug 2021 17:50:47 -0700 Subject: [PATCH 222/273] Add setting to have `sbt` not exit if a program calls System.exit Without this a useless and misleading log4j error is reported. --- build.sbt | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/build.sbt b/build.sbt index 45723331d..47e450be8 100644 --- a/build.sbt +++ b/build.sbt @@ -1,8 +1,8 @@ // See LICENSE for license details. val defaultVersions = Map( - "chisel3" -> "3.4.+", - "chisel-iotesters" -> "1.5.+" + "chisel3" -> "3.5-SNAPSHOT", + "chisel-iotesters" -> "2.5-SNAPSHOT" ) lazy val commonSettings = Seq( @@ -16,6 +16,8 @@ lazy val commonSettings = Seq( libraryDependencies ++= Seq( "com.typesafe.play" %% "play-json" % "2.9.2", "org.scalatest" %% "scalatest" % "3.2.9" % "test", + "org.apache.logging.log4j" % "log4j-api" % "2.11.2", + "org.apache.logging.log4j" % "log4j-core" % "2.11.2" ), resolvers ++= Seq( Resolver.sonatypeRepo("snapshots"), @@ -31,6 +33,7 @@ lazy val commonSettings = Seq( lazy val tapeout = (project in file("tapeout")) .settings(commonSettings) .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) + .settings(fork := true) .settings( mainClass := Some("barstools.macros.MacroCompiler") ) From 352fa91b62785ceba1575f9093bbce7e42034a6e Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 10 Aug 2021 15:28:25 -0700 Subject: [PATCH 223/273] IOCell toBool => asBool MacroCompiler needs more specific firrtl.Utils imports, Comment out legalize SynFlops more precise imports YamlHelpers use FileUtils ResetInverter LegacyModule => Module Retime use Module instead of LegacyModule YamlHelpers uses FileUtils instead of getResource ResetInvert use Module instead of LegacyModule Masks add scalatest wrappers for getting better test reports SpecificExamples drop tapeout prefix, and add scalatest blocks GenerateSpec, let ChiselStage emit the firrt directly to target dir, add some actual tests that files were created GenerateTopSpec use FileUtils FlipChipMacroSpec use FileUtils IOPropertiesSpec use FileUtils --- .../barstools/iocell/chisel/IOCell.scala | 2 +- .../barstools/macros/MacroCompiler.scala | 3 +- .../scala/barstools/macros/SynFlops.scala | 4 +- .../tapeout/transforms/ResetInverter.scala | 2 +- .../tapeout/transforms/retime/Retime.scala | 2 +- .../transforms/utils/YamlHelpers.scala | 7 +- .../src/main/scala/mdf/macrolib/Utils.scala | 11 ++- .../test/scala/barstools/macros/Masks.scala | 92 ++++++++++++++----- .../barstools/macros/SpecificExamples.scala | 25 +++-- .../tapeout/transforms/GenerateSpec.scala | 10 +- .../tapeout/transforms/GenerateTopSpec.scala | 3 +- .../mdf/macrolib/FlipChipMacroSpec.scala | 5 +- .../scala/mdf/macrolib/IOPropertiesSpec.scala | 5 +- 13 files changed, 118 insertions(+), 53 deletions(-) diff --git a/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala b/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala index d244d2984..6d4449366 100644 --- a/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -141,7 +141,7 @@ object IOCell { * @param name An optional name or name prefix to use for naming IO cells * @return A Seq of all generated IO cell instances */ - val toSyncReset: (Reset) => Bool = _.toBool + val toSyncReset: (Reset) => Bool = _.asBool() val toAsyncReset: (Reset) => AsyncReset = _.asAsyncReset def generateFromSignal[T <: Data, R <: Reset]( coreSignal: T, diff --git a/tapeout/src/main/scala/barstools/macros/MacroCompiler.scala b/tapeout/src/main/scala/barstools/macros/MacroCompiler.scala index bfcf78dad..6218b593b 100644 --- a/tapeout/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/tapeout/src/main/scala/barstools/macros/MacroCompiler.scala @@ -8,7 +8,7 @@ package barstools.macros import barstools.macros.Utils._ -import firrtl.Utils._ +import firrtl.Utils.{BoolType, one, zero} import firrtl.annotations._ import firrtl.ir._ import firrtl.stage.{FirrtlSourceAnnotation, FirrtlStage, Forms, OutputFileAnnotation, RunFirrtlTransformAnnotation} @@ -783,7 +783,6 @@ class MacroCompilerOptimizations extends SeqTransform with DependencyAPIMigratio new firrtl.transforms.ConstantPropagation, passes.memlib.VerilogMemDelays, new firrtl.transforms.ConstantPropagation, - passes.Legalize, passes.SplitExpressions, passes.CommonSubexpressionElimination ) diff --git a/tapeout/src/main/scala/barstools/macros/SynFlops.scala b/tapeout/src/main/scala/barstools/macros/SynFlops.scala index 5d39cda94..a6fe32a40 100644 --- a/tapeout/src/main/scala/barstools/macros/SynFlops.scala +++ b/tapeout/src/main/scala/barstools/macros/SynFlops.scala @@ -3,7 +3,7 @@ package barstools.macros import barstools.macros.Utils._ -import firrtl.Utils._ +import firrtl.Utils.{zero, one} import firrtl._ import firrtl.ir._ import firrtl.passes.MemPortUtils.memPortField @@ -27,7 +27,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa case Some(gran) => (UIntType(IntWidth(gran)), gran.intValue) } - val maxDepth = min(lib.src.depth, 1 << 26) + val maxDepth = firrtl.Utils.min(lib.src.depth, 1 << 26) val numMems = lib.src.depth / maxDepth // Change macro to be mapped onto to look like the below mem diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala index 33d2f78aa..29c9f0da7 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala @@ -60,7 +60,7 @@ class ResetInverterTransform extends Transform with DependencyAPIMigration { trait ResetInverter { self: chisel3.Module => - def invert[T <: chisel3.internal.LegacyModule](module: T): Unit = { + def invert[T <: chisel3.Module](module: T): Unit = { chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation with RunFirrtlTransform { def transformClass: Class[_ <: Transform] = classOf[ResetInverterTransform] def toFirrtl: Annotation = ResetInverterAnnotation(module.toNamed) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala index 931af88d2..1a9d66685 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala @@ -39,7 +39,7 @@ class RetimeTransform extends Transform with DependencyAPIMigration { trait RetimeLib { self: chisel3.Module => - def retime[T <: chisel3.internal.LegacyModule](module: T): Unit = { + def retime[T <: chisel3.Module](module: T): Unit = { chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation with RunFirrtlTransform { def transformClass: Class[_ <: Transform] = classOf[RetimeTransform] def toFirrtl: Annotation = RetimeAnnotation(module.toNamed) diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala index 9a226de57..9b58e083b 100644 --- a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala +++ b/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala @@ -1,5 +1,6 @@ package barstools.tapeout.transforms.utils +import firrtl.FileUtils import net.jcazevedo.moultingyaml._ import java.io.File @@ -10,10 +11,10 @@ class YamlFileReader(resource: String) { val yamlString = file match { case f if f.isEmpty => // Use example config if no file is provided - val stream = getClass.getResourceAsStream(resource) - io.Source.fromInputStream(stream).mkString + val stream = FileUtils.getTextResource(resource) + stream case f if new File(f).exists => - scala.io.Source.fromFile(f).getLines.mkString("\n") + FileUtils.getText(f) case _ => throw new Exception("No valid Yaml file found!") } diff --git a/tapeout/src/main/scala/mdf/macrolib/Utils.scala b/tapeout/src/main/scala/mdf/macrolib/Utils.scala index 795cff0ac..86d78a24c 100644 --- a/tapeout/src/main/scala/mdf/macrolib/Utils.scala +++ b/tapeout/src/main/scala/mdf/macrolib/Utils.scala @@ -1,6 +1,8 @@ package mdf.macrolib import play.api.libs.json._ + +import java.io.FileNotFoundException import scala.collection.mutable.ListBuffer import scala.language.implicitConversions @@ -37,7 +39,14 @@ object Utils { path match { case None => None // Read file into string and parse - case Some(p) => Utils.readMDFFromString(scala.io.Source.fromFile(p).mkString) + case Some(p) => + try { + Utils.readMDFFromString(scala.io.Source.fromFile(p).mkString) + } catch { + case f: FileNotFoundException => + println(s"FILE NOT FOUND $p in dir ${os.pwd}") + throw f + } } } diff --git a/tapeout/src/test/scala/barstools/macros/Masks.scala b/tapeout/src/test/scala/barstools/macros/Masks.scala index c472669ac..43d6b3d67 100644 --- a/tapeout/src/test/scala/barstools/macros/Masks.scala +++ b/tapeout/src/test/scala/barstools/macros/Masks.scala @@ -27,7 +27,9 @@ class Masks_FourTypes_NonMaskedMem_NonMaskedLib override lazy val libWidth = 8 override lazy val libMaskGran = None - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_FourTypes_NonMaskedMem_MaskedLib @@ -40,7 +42,9 @@ class Masks_FourTypes_NonMaskedMem_MaskedLib override lazy val libWidth = 8 override lazy val libMaskGran = Some(2) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_FourTypes_MaskedMem_NonMaskedLib @@ -53,7 +57,9 @@ class Masks_FourTypes_MaskedMem_NonMaskedLib override lazy val libWidth = 8 override lazy val libMaskGran = None - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran @@ -66,7 +72,9 @@ class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran override lazy val libWidth = 8 override lazy val libMaskGran = None - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_FourTypes_MaskedMem_MaskedLib @@ -79,7 +87,9 @@ class Masks_FourTypes_MaskedMem_MaskedLib override lazy val libWidth = 16 override lazy val libMaskGran = Some(4) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran @@ -92,7 +102,9 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran override lazy val libWidth = 16 override lazy val libMaskGran = Some(8) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran @@ -105,7 +117,9 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran override lazy val libWidth = 32 override lazy val libMaskGran = Some(8) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } // Bit-mask memories to non-masked libs whose width is larger than 1. @@ -117,7 +131,9 @@ class Masks_BitMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGene override lazy val libWidth = 8 override lazy val libMaskGran = None - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } // FPGA-style byte-masked memories. @@ -131,7 +147,9 @@ class Masks_FPGAStyle_32_8 override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(8) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } // Simple powers of two with bit-masked lib. @@ -145,7 +163,9 @@ class Masks_PowersOfTwo_8_1 override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_PowersOfTwo_16_1 @@ -157,7 +177,9 @@ class Masks_PowersOfTwo_16_1 override lazy val memMaskGran = Some(16) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_PowersOfTwo_32_1 @@ -169,7 +191,9 @@ class Masks_PowersOfTwo_32_1 override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_PowersOfTwo_64_1 @@ -181,7 +205,9 @@ class Masks_PowersOfTwo_64_1 override lazy val memMaskGran = Some(64) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } // Simple powers of two with non bit-masked lib. @@ -195,7 +221,9 @@ class Masks_PowersOfTwo_32_4 override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(4) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_PowersOfTwo_32_8 @@ -207,7 +235,9 @@ class Masks_PowersOfTwo_32_8 override lazy val memMaskGran = Some(32) override lazy val libMaskGran = Some(8) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_PowersOfTwo_8_8 @@ -219,7 +249,9 @@ class Masks_PowersOfTwo_8_8 override lazy val memMaskGran = Some(8) override lazy val libMaskGran = Some(8) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } // Width as a multiple of the mask, bit-masked lib @@ -233,7 +265,9 @@ class Masks_IntegerMaskMultiple_20_10 override lazy val memMaskGran = Some(10) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_IntegerMaskMultiple_21_7 @@ -258,7 +292,9 @@ class Masks_IntegerMaskMultiple_21_21 override lazy val memMaskGran = Some(21) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_IntegerMaskMultiple_84_21 @@ -270,7 +306,9 @@ class Masks_IntegerMaskMultiple_84_21 override lazy val memMaskGran = Some(21) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_IntegerMaskMultiple_92_23 @@ -282,7 +320,9 @@ class Masks_IntegerMaskMultiple_92_23 override lazy val memMaskGran = Some(23) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_IntegerMaskMultiple_117_13 @@ -294,7 +334,9 @@ class Masks_IntegerMaskMultiple_117_13 override lazy val memMaskGran = Some(13) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_IntegerMaskMultiple_160_20 @@ -306,7 +348,9 @@ class Masks_IntegerMaskMultiple_160_20 override lazy val memMaskGran = Some(20) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class Masks_IntegerMaskMultiple_184_23 @@ -318,7 +362,9 @@ class Masks_IntegerMaskMultiple_184_23 override lazy val memMaskGran = Some(23) override lazy val libMaskGran = Some(1) - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } // Width as an non-integer multiple of the mask, bit-masked lib diff --git a/tapeout/src/test/scala/barstools/macros/SpecificExamples.scala b/tapeout/src/test/scala/barstools/macros/SpecificExamples.scala index 6ee7255a5..1a9571994 100644 --- a/tapeout/src/test/scala/barstools/macros/SpecificExamples.scala +++ b/tapeout/src/test/scala/barstools/macros/SpecificExamples.scala @@ -27,7 +27,7 @@ class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { val lib = s"lib-WriteEnableTest.json" // lib. of mems to create it val v = s"WriteEnableTest.json" - override val libPrefix = "tapeout/src/test/resources" + override val libPrefix = "src/test/resources" val memSRAMs = mdf.macrolib.Utils .readMDFFromString(""" @@ -89,7 +89,9 @@ circuit cc_banks_0_ext : defname = fake_mem """ - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { @@ -97,7 +99,7 @@ class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { val lib = s"lib-MaskPortTest.json" // lib. of mems to create it val v = s"MaskPortTest.json" - override val libPrefix = "tapeout/src/test/resources" + override val libPrefix = "src/test/resources" val memSRAMs = mdf.macrolib.Utils .readMDFFromString(""" @@ -173,7 +175,9 @@ circuit cc_dir_ext : defname = fake_mem """ - compileExecuteAndTest(mem, lib, v, output) + it should "compile, exectue, and test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { @@ -181,10 +185,9 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { val lib = s"lib-BOOMTest.json" val v = s"BOOMTest.v" - override val libPrefix = "tapeout/src/test/resources" + override val libPrefix = "src/test/resources" - val memSRAMs = mdf.macrolib.Utils - .readMDFFromString(""" + val memSRAMs = mdf.macrolib.Utils.readMDFFromString(""" [ { "type" : "sram", "name" : "_T_182_ext", @@ -1345,7 +1348,9 @@ circuit smem_0_ext : defname = my_sram_1rw_64x8 """ - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute and test the boom test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class SmallTagArrayTest extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleTestGenerator { @@ -1378,7 +1383,9 @@ class SmallTagArrayTest extends MacroCompilerSpec with HasSRAMGenerator with Has | dout <= mux(UInt<1>("h1"), dout_0, UInt<26>("h0")) """.stripMargin - compileExecuteAndTest(mem, lib, v, output) + it should "compile, execute, and test, the small tag array test" in { + compileExecuteAndTest(mem, lib, v, output) + } } class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala index ec4822fec..5c7d53f1d 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala @@ -7,6 +7,9 @@ import chisel3.experimental.ExtModule import chisel3.stage.ChiselStage import firrtl.FileUtils import org.scalatest.freespec.AnyFreeSpec +import org.scalatest.matchers.must.Matchers.be +import org.scalatest.matchers.should.Matchers +import org.scalatest.matchers.should.Matchers.convertToAnyShouldWrapper import java.io.{File, PrintWriter} @@ -61,9 +64,7 @@ class GenerateSpec extends AnyFreeSpec { val targetDir = "test_run_dir/generate_spec_source" FileUtils.makeDirectory(targetDir) - val printWriter = new PrintWriter(new File(s"$targetDir/GenerateExampleTester.fir")) - printWriter.write((new ChiselStage()).emitFirrtl(new GenerateExampleTester, Array("--target-dir", targetDir))) - printWriter.close() + (new ChiselStage()).emitFirrtl(new GenerateExampleTester, Array("--target-dir", targetDir)) val blackBoxInverterText = """ |module BlackBoxInverter( @@ -78,7 +79,7 @@ class GenerateSpec extends AnyFreeSpec { printWriter2.write(blackBoxInverterText) printWriter2.close() - + new File(s"$targetDir/GenerateExampleTester.fir").exists() should be (true) } "generate top test" in { @@ -89,5 +90,6 @@ class GenerateSpec extends AnyFreeSpec { "-i", s"$sourceDir/GenerateExampleTester.fir", "-o", s"$targetDir/GenerateExampleTester.v" )) + new File(s"$targetDir/GenerateExampleTester.v").exists() should be (true) } } diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala index 02afa79af..e4824c4c6 100644 --- a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala +++ b/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala @@ -27,8 +27,7 @@ class GenerateTopSpec extends AnyFreeSpec with Matchers { val targetDir = "test_run_dir/generate_top_spec" FileUtils.makeDirectory(targetDir) - val stream = getClass.getResourceAsStream("/BlackBoxFloatTester.fir") - val input = scala.io.Source.fromInputStream(stream).getLines() + val input = FileUtils.getLinesResource("/BlackBoxFloatTester.fir") val printWriter = new PrintWriter(new File(s"$targetDir/BlackBoxFloatTester.fir")) printWriter.write(input.mkString("\n")) printWriter.close() diff --git a/tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala b/tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala index ba51e4d14..c6a9e7ce9 100644 --- a/tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala +++ b/tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala @@ -1,12 +1,13 @@ package mdf.macrolib +import firrtl.FileUtils import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers class FlipChipMacroSpec extends AnyFlatSpec with Matchers { "Parsing flipchipmacros" should "work" in { - val stream = getClass.getResourceAsStream("/bumps.json") - val mdf = Utils.readMDFFromString(scala.io.Source.fromInputStream(stream).getLines().mkString("\n")) + val stream = FileUtils.getLinesResource("/bumps.json") + val mdf = Utils.readMDFFromString(stream.mkString("\n")) mdf match { case Some(Seq(fcp: FlipChipMacro)) => println(fcp.visualize) } diff --git a/tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala b/tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala index ffd13be5d..b09422a74 100644 --- a/tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala +++ b/tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala @@ -1,12 +1,13 @@ package mdf.macrolib +import firrtl.FileUtils import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers class IOPropertiesSpec extends AnyFlatSpec with Matchers { "Parsing io_properties" should "work" in { - val stream = getClass.getResourceAsStream("/io_properties.json") - val mdf = Utils.readMDFFromString(scala.io.Source.fromInputStream(stream).getLines().mkString("\n")) + val stream = FileUtils.getLinesResource("/io_properties.json") + val mdf = Utils.readMDFFromString(stream.mkString("\n")) mdf match { case Some(Seq(fcp: IOProperties)) => } From 74d5da6f4af737e950f2d1516ed88101abef1041 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 10 Aug 2021 15:40:58 -0700 Subject: [PATCH 224/273] Move src/ from tapeout to top level get rid of root --- build.sbt | 4 +- .../resources/barstools/iocell/vsrc/Analog.v | 0 .../resources/barstools/iocell/vsrc/IOCell.v | 0 .../barstools/iocell/chisel/Analog.scala | 0 .../barstools/iocell/chisel/IOCell.scala | 0 .../scala/barstools/macros/CostMetric.scala | 0 .../barstools/macros/MacroCompiler.scala | 0 .../scala/barstools/macros/SynFlops.scala | 0 .../main/scala/barstools/macros/Utils.scala | 0 .../transforms/AddSuffixToModuleNames.scala | 0 .../transforms/AvoidExtModuleCollisions.scala | 0 .../transforms/ConvertToExtModPass.scala | 0 .../tapeout/transforms/EnumerateModules.scala | 0 .../transforms/GenerateTopAndHarness.scala | 0 .../tapeout/transforms/ReParentCircuit.scala | 0 .../transforms/RemoveUnusedModules.scala | 0 .../tapeout/transforms/ResetInverter.scala | 0 .../tapeout/transforms/retime/Retime.scala | 0 .../transforms/stage/TapeoutStage.scala | 0 .../tapeout/transforms/utils/FileUtils.scala | 0 .../transforms/utils/LowerAnnotations.scala | 0 .../transforms/utils/ProgrammaticBundle.scala | 0 .../transforms/utils/YamlHelpers.scala | 0 .../main/scala/mdf/macrolib/ConfReader.scala | 0 .../scala/mdf/macrolib/FillerMacroBase.scala | 0 .../scala/mdf/macrolib/FlipChipMacro.scala | 0 .../main/scala/mdf/macrolib/IOMacro.scala | 0 .../main/scala/mdf/macrolib/MacroLib.scala | 0 .../main/scala/mdf/macrolib/SRAM.scala | 0 .../main/scala/mdf/macrolib/Utils.scala | 0 .../test/resources/PadAnnotationVerilogPart.v | 0 .../src => src}/test/resources/bumps.json | 0 .../test/resources/io_properties.json | 0 .../test/resources/lib-BOOMTest.json | 0 .../test/resources/lib-MaskPortTest.json | 0 .../test/resources/lib-WriteEnableTest.json | 0 .../scala/barstools/macros/CostFunction.scala | 0 .../scala/barstools/macros/Functional.scala | 0 .../barstools/macros/MacroCompilerSpec.scala | 0 .../test/scala/barstools/macros/Masks.scala | 0 .../scala/barstools/macros/MultiPort.scala | 0 .../scala/barstools/macros/SRAMCompiler.scala | 0 .../barstools/macros/SimpleSplitDepth.scala | 0 .../barstools/macros/SimpleSplitWidth.scala | 0 .../barstools/macros/SpecificExamples.scala | 0 .../scala/barstools/macros/SynFlops.scala | 0 .../tapeout/transforms/GenerateSpec.scala | 0 .../tapeout/transforms/GenerateTopSpec.scala | 0 .../tapeout/transforms/NoFileProblem.scala | 51 +++++++++++++++++++ .../transforms/ResetInverterSpec.scala | 0 .../transforms/retime/RetimeSpec.scala | 0 .../scala/mdf/macrolib/ConfReaderSpec.scala | 0 .../mdf/macrolib/FlipChipMacroSpec.scala | 0 .../test/scala/mdf/macrolib/IOMacroSpec.scala | 0 .../scala/mdf/macrolib/IOPropertiesSpec.scala | 0 .../scala/mdf/macrolib/MacroLibOutput.scala | 0 .../scala/mdf/macrolib/MacroLibSpec.scala | 0 57 files changed, 53 insertions(+), 2 deletions(-) rename {tapeout/src => src}/main/resources/barstools/iocell/vsrc/Analog.v (100%) rename {tapeout/src => src}/main/resources/barstools/iocell/vsrc/IOCell.v (100%) rename {tapeout/src => src}/main/scala/barstools/iocell/chisel/Analog.scala (100%) rename {tapeout/src => src}/main/scala/barstools/iocell/chisel/IOCell.scala (100%) rename {tapeout/src => src}/main/scala/barstools/macros/CostMetric.scala (100%) rename {tapeout/src => src}/main/scala/barstools/macros/MacroCompiler.scala (100%) rename {tapeout/src => src}/main/scala/barstools/macros/SynFlops.scala (100%) rename {tapeout/src => src}/main/scala/barstools/macros/Utils.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/EnumerateModules.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/ResetInverter.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/retime/Retime.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala (100%) rename {tapeout/src => src}/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala (100%) rename {tapeout/src => src}/main/scala/mdf/macrolib/ConfReader.scala (100%) rename {tapeout/src => src}/main/scala/mdf/macrolib/FillerMacroBase.scala (100%) rename {tapeout/src => src}/main/scala/mdf/macrolib/FlipChipMacro.scala (100%) rename {tapeout/src => src}/main/scala/mdf/macrolib/IOMacro.scala (100%) rename {tapeout/src => src}/main/scala/mdf/macrolib/MacroLib.scala (100%) rename {tapeout/src => src}/main/scala/mdf/macrolib/SRAM.scala (100%) rename {tapeout/src => src}/main/scala/mdf/macrolib/Utils.scala (100%) rename {tapeout/src => src}/test/resources/PadAnnotationVerilogPart.v (100%) rename {tapeout/src => src}/test/resources/bumps.json (100%) rename {tapeout/src => src}/test/resources/io_properties.json (100%) rename {tapeout/src => src}/test/resources/lib-BOOMTest.json (100%) rename {tapeout/src => src}/test/resources/lib-MaskPortTest.json (100%) rename {tapeout/src => src}/test/resources/lib-WriteEnableTest.json (100%) rename {tapeout/src => src}/test/scala/barstools/macros/CostFunction.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/Functional.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/MacroCompilerSpec.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/Masks.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/MultiPort.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/SRAMCompiler.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/SimpleSplitDepth.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/SimpleSplitWidth.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/SpecificExamples.scala (100%) rename {tapeout/src => src}/test/scala/barstools/macros/SynFlops.scala (100%) rename {tapeout/src => src}/test/scala/barstools/tapeout/transforms/GenerateSpec.scala (100%) rename {tapeout/src => src}/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala (100%) create mode 100644 src/test/scala/barstools/tapeout/transforms/NoFileProblem.scala rename {tapeout/src => src}/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala (100%) rename {tapeout/src => src}/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala (100%) rename {tapeout/src => src}/test/scala/mdf/macrolib/ConfReaderSpec.scala (100%) rename {tapeout/src => src}/test/scala/mdf/macrolib/FlipChipMacroSpec.scala (100%) rename {tapeout/src => src}/test/scala/mdf/macrolib/IOMacroSpec.scala (100%) rename {tapeout/src => src}/test/scala/mdf/macrolib/IOPropertiesSpec.scala (100%) rename {tapeout/src => src}/test/scala/mdf/macrolib/MacroLibOutput.scala (100%) rename {tapeout/src => src}/test/scala/mdf/macrolib/MacroLibSpec.scala (100%) diff --git a/build.sbt b/build.sbt index 47e450be8..2b75dbc14 100644 --- a/build.sbt +++ b/build.sbt @@ -30,7 +30,7 @@ lazy val commonSettings = Seq( // //enablePlugins(sbtassembly.AssemblyPlugin) -lazy val tapeout = (project in file("tapeout")) +lazy val tapeout = (project in file(".")) .settings(commonSettings) .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) .settings(fork := true) @@ -39,4 +39,4 @@ lazy val tapeout = (project in file("tapeout")) ) .enablePlugins(sbtassembly.AssemblyPlugin) -lazy val root = (project in file(".")).aggregate(tapeout) +//lazy val root = (project in file(".")).aggregate(tapeout) diff --git a/tapeout/src/main/resources/barstools/iocell/vsrc/Analog.v b/src/main/resources/barstools/iocell/vsrc/Analog.v similarity index 100% rename from tapeout/src/main/resources/barstools/iocell/vsrc/Analog.v rename to src/main/resources/barstools/iocell/vsrc/Analog.v diff --git a/tapeout/src/main/resources/barstools/iocell/vsrc/IOCell.v b/src/main/resources/barstools/iocell/vsrc/IOCell.v similarity index 100% rename from tapeout/src/main/resources/barstools/iocell/vsrc/IOCell.v rename to src/main/resources/barstools/iocell/vsrc/IOCell.v diff --git a/tapeout/src/main/scala/barstools/iocell/chisel/Analog.scala b/src/main/scala/barstools/iocell/chisel/Analog.scala similarity index 100% rename from tapeout/src/main/scala/barstools/iocell/chisel/Analog.scala rename to src/main/scala/barstools/iocell/chisel/Analog.scala diff --git a/tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala b/src/main/scala/barstools/iocell/chisel/IOCell.scala similarity index 100% rename from tapeout/src/main/scala/barstools/iocell/chisel/IOCell.scala rename to src/main/scala/barstools/iocell/chisel/IOCell.scala diff --git a/tapeout/src/main/scala/barstools/macros/CostMetric.scala b/src/main/scala/barstools/macros/CostMetric.scala similarity index 100% rename from tapeout/src/main/scala/barstools/macros/CostMetric.scala rename to src/main/scala/barstools/macros/CostMetric.scala diff --git a/tapeout/src/main/scala/barstools/macros/MacroCompiler.scala b/src/main/scala/barstools/macros/MacroCompiler.scala similarity index 100% rename from tapeout/src/main/scala/barstools/macros/MacroCompiler.scala rename to src/main/scala/barstools/macros/MacroCompiler.scala diff --git a/tapeout/src/main/scala/barstools/macros/SynFlops.scala b/src/main/scala/barstools/macros/SynFlops.scala similarity index 100% rename from tapeout/src/main/scala/barstools/macros/SynFlops.scala rename to src/main/scala/barstools/macros/SynFlops.scala diff --git a/tapeout/src/main/scala/barstools/macros/Utils.scala b/src/main/scala/barstools/macros/Utils.scala similarity index 100% rename from tapeout/src/main/scala/barstools/macros/Utils.scala rename to src/main/scala/barstools/macros/Utils.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala b/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala rename to src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala b/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala rename to src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala b/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala rename to src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala b/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala rename to src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala rename to src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala b/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala rename to src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala b/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala rename to src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala b/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala rename to src/main/scala/barstools/tapeout/transforms/ResetInverter.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala b/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala rename to src/main/scala/barstools/tapeout/transforms/retime/Retime.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala rename to src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala b/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala rename to src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala b/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala rename to src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala b/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala rename to src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala diff --git a/tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala b/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala similarity index 100% rename from tapeout/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala rename to src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala diff --git a/tapeout/src/main/scala/mdf/macrolib/ConfReader.scala b/src/main/scala/mdf/macrolib/ConfReader.scala similarity index 100% rename from tapeout/src/main/scala/mdf/macrolib/ConfReader.scala rename to src/main/scala/mdf/macrolib/ConfReader.scala diff --git a/tapeout/src/main/scala/mdf/macrolib/FillerMacroBase.scala b/src/main/scala/mdf/macrolib/FillerMacroBase.scala similarity index 100% rename from tapeout/src/main/scala/mdf/macrolib/FillerMacroBase.scala rename to src/main/scala/mdf/macrolib/FillerMacroBase.scala diff --git a/tapeout/src/main/scala/mdf/macrolib/FlipChipMacro.scala b/src/main/scala/mdf/macrolib/FlipChipMacro.scala similarity index 100% rename from tapeout/src/main/scala/mdf/macrolib/FlipChipMacro.scala rename to src/main/scala/mdf/macrolib/FlipChipMacro.scala diff --git a/tapeout/src/main/scala/mdf/macrolib/IOMacro.scala b/src/main/scala/mdf/macrolib/IOMacro.scala similarity index 100% rename from tapeout/src/main/scala/mdf/macrolib/IOMacro.scala rename to src/main/scala/mdf/macrolib/IOMacro.scala diff --git a/tapeout/src/main/scala/mdf/macrolib/MacroLib.scala b/src/main/scala/mdf/macrolib/MacroLib.scala similarity index 100% rename from tapeout/src/main/scala/mdf/macrolib/MacroLib.scala rename to src/main/scala/mdf/macrolib/MacroLib.scala diff --git a/tapeout/src/main/scala/mdf/macrolib/SRAM.scala b/src/main/scala/mdf/macrolib/SRAM.scala similarity index 100% rename from tapeout/src/main/scala/mdf/macrolib/SRAM.scala rename to src/main/scala/mdf/macrolib/SRAM.scala diff --git a/tapeout/src/main/scala/mdf/macrolib/Utils.scala b/src/main/scala/mdf/macrolib/Utils.scala similarity index 100% rename from tapeout/src/main/scala/mdf/macrolib/Utils.scala rename to src/main/scala/mdf/macrolib/Utils.scala diff --git a/tapeout/src/test/resources/PadAnnotationVerilogPart.v b/src/test/resources/PadAnnotationVerilogPart.v similarity index 100% rename from tapeout/src/test/resources/PadAnnotationVerilogPart.v rename to src/test/resources/PadAnnotationVerilogPart.v diff --git a/tapeout/src/test/resources/bumps.json b/src/test/resources/bumps.json similarity index 100% rename from tapeout/src/test/resources/bumps.json rename to src/test/resources/bumps.json diff --git a/tapeout/src/test/resources/io_properties.json b/src/test/resources/io_properties.json similarity index 100% rename from tapeout/src/test/resources/io_properties.json rename to src/test/resources/io_properties.json diff --git a/tapeout/src/test/resources/lib-BOOMTest.json b/src/test/resources/lib-BOOMTest.json similarity index 100% rename from tapeout/src/test/resources/lib-BOOMTest.json rename to src/test/resources/lib-BOOMTest.json diff --git a/tapeout/src/test/resources/lib-MaskPortTest.json b/src/test/resources/lib-MaskPortTest.json similarity index 100% rename from tapeout/src/test/resources/lib-MaskPortTest.json rename to src/test/resources/lib-MaskPortTest.json diff --git a/tapeout/src/test/resources/lib-WriteEnableTest.json b/src/test/resources/lib-WriteEnableTest.json similarity index 100% rename from tapeout/src/test/resources/lib-WriteEnableTest.json rename to src/test/resources/lib-WriteEnableTest.json diff --git a/tapeout/src/test/scala/barstools/macros/CostFunction.scala b/src/test/scala/barstools/macros/CostFunction.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/CostFunction.scala rename to src/test/scala/barstools/macros/CostFunction.scala diff --git a/tapeout/src/test/scala/barstools/macros/Functional.scala b/src/test/scala/barstools/macros/Functional.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/Functional.scala rename to src/test/scala/barstools/macros/Functional.scala diff --git a/tapeout/src/test/scala/barstools/macros/MacroCompilerSpec.scala b/src/test/scala/barstools/macros/MacroCompilerSpec.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/MacroCompilerSpec.scala rename to src/test/scala/barstools/macros/MacroCompilerSpec.scala diff --git a/tapeout/src/test/scala/barstools/macros/Masks.scala b/src/test/scala/barstools/macros/Masks.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/Masks.scala rename to src/test/scala/barstools/macros/Masks.scala diff --git a/tapeout/src/test/scala/barstools/macros/MultiPort.scala b/src/test/scala/barstools/macros/MultiPort.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/MultiPort.scala rename to src/test/scala/barstools/macros/MultiPort.scala diff --git a/tapeout/src/test/scala/barstools/macros/SRAMCompiler.scala b/src/test/scala/barstools/macros/SRAMCompiler.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/SRAMCompiler.scala rename to src/test/scala/barstools/macros/SRAMCompiler.scala diff --git a/tapeout/src/test/scala/barstools/macros/SimpleSplitDepth.scala b/src/test/scala/barstools/macros/SimpleSplitDepth.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/SimpleSplitDepth.scala rename to src/test/scala/barstools/macros/SimpleSplitDepth.scala diff --git a/tapeout/src/test/scala/barstools/macros/SimpleSplitWidth.scala b/src/test/scala/barstools/macros/SimpleSplitWidth.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/SimpleSplitWidth.scala rename to src/test/scala/barstools/macros/SimpleSplitWidth.scala diff --git a/tapeout/src/test/scala/barstools/macros/SpecificExamples.scala b/src/test/scala/barstools/macros/SpecificExamples.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/SpecificExamples.scala rename to src/test/scala/barstools/macros/SpecificExamples.scala diff --git a/tapeout/src/test/scala/barstools/macros/SynFlops.scala b/src/test/scala/barstools/macros/SynFlops.scala similarity index 100% rename from tapeout/src/test/scala/barstools/macros/SynFlops.scala rename to src/test/scala/barstools/macros/SynFlops.scala diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala similarity index 100% rename from tapeout/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala rename to src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala similarity index 100% rename from tapeout/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala rename to src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala diff --git a/src/test/scala/barstools/tapeout/transforms/NoFileProblem.scala b/src/test/scala/barstools/tapeout/transforms/NoFileProblem.scala new file mode 100644 index 000000000..da8e0acb9 --- /dev/null +++ b/src/test/scala/barstools/tapeout/transforms/NoFileProblem.scala @@ -0,0 +1,51 @@ +//// SPDX-License-Identifier: Apache-2.0 +// +//package barstools.tapeout.transforms +// +//import firrtl.AnnotationSeq +//import firrtl.options.{Shell, Stage, StageMain} +//import firrtl.stage.{FirrtlCli, FirrtlStage} +//import logger.Logger.OutputCaptor +//import logger.{LazyLogging, LogLevel, Logger} +//import org.scalatest.freespec.AnyFreeSpec +// +//import java.io.{ByteArrayOutputStream, PrintStream} +// +//class NoFileStage extends Stage { +// override val shell: Shell = new Shell(applicationName = "tapeout") with FirrtlCli +// +// override def run(annotations: AnnotationSeq): AnnotationSeq = { +// Logger.makeScope(annotations) { +// val annos = new FirrtlStage().execute(Array.empty, annotations) +// } +// annotations +// } +//} +// +//class NoFileGenerator(annotationSeq: AnnotationSeq) extends LazyLogging { +// +//} +// +//object NoFileGenerator extends StageMain(new NoFileStage) +// +//class NoFileProblem extends AnyFreeSpec { +// // "should fail in a way that discloses missing file" - { +// // (new NoFileStage).execute(Array("-i", "jackalope"), Seq.empty) +// // } +// +// "should fail in a way that discloses missing file with output capture" in { +// val buffer = new ByteArrayOutputStream() +// Console.withOut(new PrintStream(buffer)) { +// NoFileGenerator.main(Array("-i", "jackalope", "-ll", "info")) +// } +// println(buffer.toString) +// } +// +// "don't uses Console.withOut" in { +// val captor = new OutputCaptor +// Logger.setOutput(captor.printStream) +// Logger.setLevel(getClass.getName, LogLevel.Info) +// NoFileGenerator.main(Array("-i", "jackalope", "-ll", "info")) +// println(captor.getOutputAsString) +// } +//} diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala b/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala similarity index 100% rename from tapeout/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala rename to src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala diff --git a/tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala b/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala similarity index 100% rename from tapeout/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala rename to src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala diff --git a/tapeout/src/test/scala/mdf/macrolib/ConfReaderSpec.scala b/src/test/scala/mdf/macrolib/ConfReaderSpec.scala similarity index 100% rename from tapeout/src/test/scala/mdf/macrolib/ConfReaderSpec.scala rename to src/test/scala/mdf/macrolib/ConfReaderSpec.scala diff --git a/tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala b/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala similarity index 100% rename from tapeout/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala rename to src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala diff --git a/tapeout/src/test/scala/mdf/macrolib/IOMacroSpec.scala b/src/test/scala/mdf/macrolib/IOMacroSpec.scala similarity index 100% rename from tapeout/src/test/scala/mdf/macrolib/IOMacroSpec.scala rename to src/test/scala/mdf/macrolib/IOMacroSpec.scala diff --git a/tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala b/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala similarity index 100% rename from tapeout/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala rename to src/test/scala/mdf/macrolib/IOPropertiesSpec.scala diff --git a/tapeout/src/test/scala/mdf/macrolib/MacroLibOutput.scala b/src/test/scala/mdf/macrolib/MacroLibOutput.scala similarity index 100% rename from tapeout/src/test/scala/mdf/macrolib/MacroLibOutput.scala rename to src/test/scala/mdf/macrolib/MacroLibOutput.scala diff --git a/tapeout/src/test/scala/mdf/macrolib/MacroLibSpec.scala b/src/test/scala/mdf/macrolib/MacroLibSpec.scala similarity index 100% rename from tapeout/src/test/scala/mdf/macrolib/MacroLibSpec.scala rename to src/test/scala/mdf/macrolib/MacroLibSpec.scala From 08eba27126af44f1cc431b8d6ec9bff086314b7b Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 10 Aug 2021 16:26:11 -0700 Subject: [PATCH 225/273] MultiPort remove random println GenerateSpec fix so "generate top test" does not rely on side-effects of previous test GenerateTopSpec fix so it creates needed input file itself, does not rely on other tests to do that --- .../scala/barstools/macros/MultiPort.scala | 1 - .../tapeout/transforms/GenerateSpec.scala | 35 +++++++++++-------- .../tapeout/transforms/GenerateTopSpec.scala | 12 ++++++- 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/src/test/scala/barstools/macros/MultiPort.scala b/src/test/scala/barstools/macros/MultiPort.scala index 1968f6aa4..75eb20d73 100644 --- a/src/test/scala/barstools/macros/MultiPort.scala +++ b/src/test/scala/barstools/macros/MultiPort.scala @@ -316,7 +316,6 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera lazy val memMaskGranB = 8 // these generators are run at constructor time override def generateMemSRAM() = { - println(memMaskGranB) SRAMMacro( name = mem_name, width = memWidth, diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala index 5c7d53f1d..b5c03c444 100644 --- a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala +++ b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala @@ -8,7 +8,6 @@ import chisel3.stage.ChiselStage import firrtl.FileUtils import org.scalatest.freespec.AnyFreeSpec import org.scalatest.matchers.must.Matchers.be -import org.scalatest.matchers.should.Matchers import org.scalatest.matchers.should.Matchers.convertToAnyShouldWrapper import java.io.{File, PrintWriter} @@ -60,34 +59,40 @@ class GenerateExampleTester extends MultiIOModule { } class GenerateSpec extends AnyFreeSpec { - "generate test data" in { - val targetDir = "test_run_dir/generate_spec_source" + + def generateTestData(targetDir: String): Unit = { FileUtils.makeDirectory(targetDir) - (new ChiselStage()).emitFirrtl(new GenerateExampleTester, Array("--target-dir", targetDir)) + new ChiselStage().emitFirrtl(new GenerateExampleTester, Array("--target-dir", targetDir)) - val blackBoxInverterText = """ - |module BlackBoxInverter( - | input [0:0] in, - | output [0:0] out - |); - | assign out = !in; - |endmodule - |""".stripMargin + val blackBoxInverterText = + """ + |module BlackBoxInverter( + | input [0:0] in, + | output [0:0] out + |); + | assign out = !in; + |endmodule + |""".stripMargin val printWriter2 = new PrintWriter(new File(s"$targetDir/BlackBoxInverter.v")) printWriter2.write(blackBoxInverterText) printWriter2.close() + } + + "generate test data" in { + val targetDir = "test_run_dir/generate_spec_source" + generateTestData(targetDir) - new File(s"$targetDir/GenerateExampleTester.fir").exists() should be (true) + new File(s"$targetDir/GenerateExampleTester.fir").exists() should be(true) } "generate top test" in { - val sourceDir = "test_run_dir/generate_spec_source" val targetDir = "test_run_dir/generate_spec" + generateTestData(targetDir) GenerateTop.main(Array( - "-i", s"$sourceDir/GenerateExampleTester.fir", + "-i", s"$targetDir/GenerateExampleTester.fir", "-o", s"$targetDir/GenerateExampleTester.v" )) new File(s"$targetDir/GenerateExampleTester.v").exists() should be (true) diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala index e4824c4c6..c9de43781 100644 --- a/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala +++ b/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala @@ -2,6 +2,7 @@ package barstools.tapeout.transforms +import chisel3.stage.ChiselStage import firrtl.FileUtils import org.scalatest.freespec.AnyFreeSpec import org.scalatest.matchers.should.Matchers @@ -11,9 +12,18 @@ import java.io.{ByteArrayOutputStream, File, PrintStream, PrintWriter} class GenerateTopSpec extends AnyFreeSpec with Matchers { "Generate top and harness" - { "should include the following transforms" in { + val targetDir = "test_run_dir/generate_top_and_harness" + FileUtils.makeDirectory(targetDir) + (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--target-dir", targetDir)) + val buffer = new ByteArrayOutputStream() Console.withOut(new PrintStream(buffer)) { - GenerateTopAndHarness.main(Array("-i", "ExampleModuleNeedsResetInverted.fir", "-ll", "info")) + GenerateTopAndHarness.main( + Array( + "-i", s"$targetDir/ExampleModuleNeedsResetInverted.fir", + "-ll", "info" + ) + ) } val output = buffer.toString output should include("barstools.tapeout.transforms.AddSuffixToModuleNames") From b2cee7ccb8ba119f27e0bdb22e66fcf0c806377a Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 10 Aug 2021 18:04:49 -0700 Subject: [PATCH 226/273] GenerateTopSpec get rid of `Console.withOut` --- .../tapeout/transforms/GenerateTopSpec.scala | 79 +++++++++---------- .../tapeout/transforms/NoFileProblem.scala | 51 ------------ 2 files changed, 39 insertions(+), 91 deletions(-) delete mode 100644 src/test/scala/barstools/tapeout/transforms/NoFileProblem.scala diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala index c9de43781..a7a165a46 100644 --- a/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala +++ b/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala @@ -7,25 +7,25 @@ import firrtl.FileUtils import org.scalatest.freespec.AnyFreeSpec import org.scalatest.matchers.should.Matchers -import java.io.{ByteArrayOutputStream, File, PrintStream, PrintWriter} +import java.io.{File, PrintWriter} class GenerateTopSpec extends AnyFreeSpec with Matchers { "Generate top and harness" - { "should include the following transforms" in { val targetDir = "test_run_dir/generate_top_and_harness" + val transformListName = s"$targetDir/ExampleModuleNeesResetInvertTransforms.log" FileUtils.makeDirectory(targetDir) (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--target-dir", targetDir)) - val buffer = new ByteArrayOutputStream() - Console.withOut(new PrintStream(buffer)) { - GenerateTopAndHarness.main( - Array( - "-i", s"$targetDir/ExampleModuleNeedsResetInverted.fir", - "-ll", "info" - ) + GenerateTopAndHarness.main( + Array( + "-i", s"$targetDir/ExampleModuleNeedsResetInverted.fir", + "-ll", "info", + "--log-file", transformListName ) - } - val output = buffer.toString + ) + + val output = FileUtils.getText(transformListName) output should include("barstools.tapeout.transforms.AddSuffixToModuleNames") output should include("barstools.tapeout.transforms.ConvertToExtMod") output should include("barstools.tapeout.transforms.RemoveUnusedModules") @@ -33,8 +33,9 @@ class GenerateTopSpec extends AnyFreeSpec with Matchers { } } - "generate harness should " ignore { + "generate harness should be generated" ignore { val targetDir = "test_run_dir/generate_top_spec" + val logOutputName = s"$targetDir/top_spec_output.log" FileUtils.makeDirectory(targetDir) val input = FileUtils.getLinesResource("/BlackBoxFloatTester.fir") @@ -44,34 +45,32 @@ class GenerateTopSpec extends AnyFreeSpec with Matchers { println(s"""Resource: ${input.mkString("\n")}""") - -// val buffer = new ByteArrayOutputStream() -// Console.withOut(new PrintStream(buffer)) { - GenerateTopAndHarness.main( - Array( - "--target-dir", "test_run_dir/generate_top_spec", - "-i", s"$targetDir/BlackBoxFloatTester.fir", - "-o", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.v", - "-tho", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.v", - "-i", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.fir", - "--syn-top", "UnitTestSuite", - "--harness-top", "TestHarness", - "-faf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.anno.json", - "-tsaof", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.anno.json", - "-tdf", "firrtl_black_box_resource_files.top.f", - "-tsf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.fir", - "-thaof", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.anno.json", - "-hdf", "firrtl_black_box_resource_files.harness.f", - "-thf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.fir", - "--infer-rw", - "--repl-seq-mem", "-c:TestHarness:-o:chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.mems.conf", - "-thconf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.mems.conf", - "-td", "test_run_dir/from-ci", - "-ll", "info" - ) + GenerateTopAndHarness.main( + Array( + "--target-dir", "test_run_dir/generate_top_spec", + "-i", s"$targetDir/BlackBoxFloatTester.fir", + "-o", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.v", + "-tho", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.v", + "-i", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.fir", + "--syn-top", "UnitTestSuite", + "--harness-top", "TestHarness", + "-faf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.anno.json", + "-tsaof", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.anno.json", + "-tdf", "firrtl_black_box_resource_files.top.f", + "-tsf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.fir", + "-thaof", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.anno.json", + "-hdf", "firrtl_black_box_resource_files.harness.f", + "-thf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.fir", + "--infer-rw", + "--repl-seq-mem", "-c:TestHarness:-o:chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.mems.conf", + "-thconf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.mems.conf", + "-td", "test_run_dir/from-ci", + "-ll", "info", + "--log-file", logOutputName ) - } -// val output = buffer.toString -// println(output) + ) + + val output = FileUtils.getText(logOutputName) + println(output) + } } diff --git a/src/test/scala/barstools/tapeout/transforms/NoFileProblem.scala b/src/test/scala/barstools/tapeout/transforms/NoFileProblem.scala deleted file mode 100644 index da8e0acb9..000000000 --- a/src/test/scala/barstools/tapeout/transforms/NoFileProblem.scala +++ /dev/null @@ -1,51 +0,0 @@ -//// SPDX-License-Identifier: Apache-2.0 -// -//package barstools.tapeout.transforms -// -//import firrtl.AnnotationSeq -//import firrtl.options.{Shell, Stage, StageMain} -//import firrtl.stage.{FirrtlCli, FirrtlStage} -//import logger.Logger.OutputCaptor -//import logger.{LazyLogging, LogLevel, Logger} -//import org.scalatest.freespec.AnyFreeSpec -// -//import java.io.{ByteArrayOutputStream, PrintStream} -// -//class NoFileStage extends Stage { -// override val shell: Shell = new Shell(applicationName = "tapeout") with FirrtlCli -// -// override def run(annotations: AnnotationSeq): AnnotationSeq = { -// Logger.makeScope(annotations) { -// val annos = new FirrtlStage().execute(Array.empty, annotations) -// } -// annotations -// } -//} -// -//class NoFileGenerator(annotationSeq: AnnotationSeq) extends LazyLogging { -// -//} -// -//object NoFileGenerator extends StageMain(new NoFileStage) -// -//class NoFileProblem extends AnyFreeSpec { -// // "should fail in a way that discloses missing file" - { -// // (new NoFileStage).execute(Array("-i", "jackalope"), Seq.empty) -// // } -// -// "should fail in a way that discloses missing file with output capture" in { -// val buffer = new ByteArrayOutputStream() -// Console.withOut(new PrintStream(buffer)) { -// NoFileGenerator.main(Array("-i", "jackalope", "-ll", "info")) -// } -// println(buffer.toString) -// } -// -// "don't uses Console.withOut" in { -// val captor = new OutputCaptor -// Logger.setOutput(captor.printStream) -// Logger.setLevel(getClass.getName, LogLevel.Info) -// NoFileGenerator.main(Array("-i", "jackalope", "-ll", "info")) -// println(captor.getOutputAsString) -// } -//} From ae01e170db78c014b45780ee3fd9df0e02cec37b Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 16 Aug 2021 10:15:07 -0700 Subject: [PATCH 227/273] Adding support for Scala 2.13 Mostly import changes Some formatting changes Runs +test --- .scalafmt.conf | 3 +- build.sbt | 5 +-- project/plugins.sbt | 1 + .../barstools/macros/MacroCompiler.scala | 32 +++++++++---------- .../scala/mdf/macrolib/FlipChipMacro.scala | 4 +-- .../scala/barstools/macros/CostFunction.scala | 3 +- 6 files changed, 26 insertions(+), 22 deletions(-) diff --git a/.scalafmt.conf b/.scalafmt.conf index c53cb6086..5be685f32 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -7,7 +7,8 @@ assumeStandardLibraryStripMargin = true docstrings = ScalaDoc lineEndings = preserve includeCurlyBraceInSelectChains = false -danglingParentheses = true +danglingParentheses.defnSite = true +danglingParentheses.callSite = true align.tokens.add = [ { diff --git a/build.sbt b/build.sbt index 2b75dbc14..ca8d06393 100644 --- a/build.sbt +++ b/build.sbt @@ -8,8 +8,9 @@ val defaultVersions = Map( lazy val commonSettings = Seq( organization := "edu.berkeley.cs", version := "0.4-SNAPSHOT", - scalaVersion := "2.12.10", - scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls", "-Xsource:2.11"), + scalaVersion := "2.12.13", + crossScalaVersions := Seq("2.12.13", "2.13.6"), + scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls"), libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) }, diff --git a/project/plugins.sbt b/project/plugins.sbt index 17de943f3..5d6b17793 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,2 +1,3 @@ addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5") +addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.3") addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2") diff --git a/src/main/scala/barstools/macros/MacroCompiler.scala b/src/main/scala/barstools/macros/MacroCompiler.scala index 6218b593b..ff75d6458 100644 --- a/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/src/main/scala/barstools/macros/MacroCompiler.scala @@ -14,7 +14,7 @@ import firrtl.ir._ import firrtl.stage.{FirrtlSourceAnnotation, FirrtlStage, Forms, OutputFileAnnotation, RunFirrtlTransformAnnotation} import firrtl.transforms.NoDCEAnnotation import firrtl.{PrimOps, _} -import mdf.macrolib._ +import mdf.macrolib.{PolarizedPort, PortPolarity, SRAMCompiler, SRAMGroup, SRAMMacro} import java.io.{File, FileWriter} import scala.collection.mutable.{ArrayBuffer, HashMap} @@ -109,15 +109,16 @@ object MacroCompilerAnnotation { * @param forceSynflops Set of memories to force compiling as flops regardless of the mode */ case class Params( - mem: String, - memFormat: Option[String], - lib: Option[String], - hammerIR: Option[String], - costMetric: CostMetric, - mode: CompilerMode, - useCompiler: Boolean, - forceCompile: Set[String], - forceSynflops: Set[String]) + mem: String, + memFormat: Option[String], + lib: Option[String], + hammerIR: Option[String], + costMetric: CostMetric, + mode: CompilerMode, + useCompiler: Boolean, + forceCompile: Set[String], + forceSynflops: Set[String] + ) extends Serializable /** Create a MacroCompilerAnnotation. * @param c Top-level circuit name (see class description) @@ -721,16 +722,16 @@ class MacroCompilerTransform extends Transform with DependencyAPIMigration { // Read, eliminate None, get only SRAM, make firrtl macro val mems: Option[Seq[Macro]] = (memFileFormat match { - case Some("conf") => Utils.readConfFromPath(Some(memFile)) + case Some("conf") => readConfFromPath(Some(memFile)) case _ => mdf.macrolib.Utils.readMDFFromPath(Some(memFile)) }) match { case Some(x: Seq[mdf.macrolib.Macro]) => - Some(Utils.filterForSRAM(Some(x)).getOrElse(List()).map { new Macro(_) }) + Some(filterForSRAM(Some(x)).getOrElse(List()).map { new Macro(_) }) case _ => None } val libs: Option[Seq[Macro]] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { case Some(x: Seq[mdf.macrolib.Macro]) => - Some(Utils.filterForSRAM(Some(x)).getOrElse(List()).map { new Macro(_) }) + Some(filterForSRAM(Some(x)).getOrElse(List()).map { new Macro(_) }) case _ => None } val compilers: Option[mdf.macrolib.SRAMCompiler] = mdf.macrolib.Utils.readMDFFromPath(libFile) match { @@ -866,10 +867,9 @@ object MacroCompiler extends App { try { val macros = params.get(MacrosFormat) match { case Some("conf") => - Utils.filterForSRAM(Utils.readConfFromPath(params.get(Macros))).get.map(x => (new Macro(x)).blackbox) + filterForSRAM(readConfFromPath(params.get(Macros))).get.map(x => (new Macro(x)).blackbox) case _ => - Utils - .filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))) + filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))) .get .map(x => (new Macro(x)).blackbox) } diff --git a/src/main/scala/mdf/macrolib/FlipChipMacro.scala b/src/main/scala/mdf/macrolib/FlipChipMacro.scala index 45b49d86a..88a20aeaa 100644 --- a/src/main/scala/mdf/macrolib/FlipChipMacro.scala +++ b/src/main/scala/mdf/macrolib/FlipChipMacro.scala @@ -54,13 +54,13 @@ object FlipChipMacro { val bumpDimensions: (Int, Int) = json.get("bump_dimensions") match { case Some(JsArray(x)) if x.size == 2 => - val z = x.map(_.as[JsNumber].value.intValue()) + val z = x.map(_.as[JsNumber].value.intValue) (z(0), z(1)) case None => return None } val bumpLocations: Seq[Seq[String]] = json.get("bump_locations") match { case Some(JsArray(array)) => - array.collect { case JsArray(a2) => a2.map(_.toString) } + array.collect { case JsArray(a2) => a2.map(_.toString).toSeq }.toSeq case _ => return None } // Can't have dimensions and locations which don't match diff --git a/src/test/scala/barstools/macros/CostFunction.scala b/src/test/scala/barstools/macros/CostFunction.scala index ceb7a61ae..ee6c557f3 100644 --- a/src/test/scala/barstools/macros/CostFunction.scala +++ b/src/test/scala/barstools/macros/CostFunction.scala @@ -1,6 +1,7 @@ package barstools.macros -import mdf.macrolib._ +import mdf.macrolib.SRAMMacro + /** Tests to check that the cost function mechanism is working properly. */ From edb15375612a7f995e073c69ca2da272c17a1513 Mon Sep 17 00:00:00 2001 From: chick Date: Mon, 16 Aug 2021 15:35:22 -0700 Subject: [PATCH 228/273] Formatting code to chisel standard - ran sbt scalafmtAll - lot of small formatting changes - added test that code must stay formatted - part of github actions workflow --- .github/workflows/run-ci.yml | 13 ++++ .../scala/barstools/macros/CostMetric.scala | 2 +- .../barstools/macros/MacroCompiler.scala | 25 ++++--- .../scala/barstools/macros/SynFlops.scala | 2 +- .../transforms/GenerateTopAndHarness.scala | 43 ++++++------ .../tapeout/transforms/ReParentCircuit.scala | 61 +++++++++-------- .../transforms/RemoveUnusedModules.scala | 2 +- .../transforms/stage/TapeoutStage.scala | 1 - .../tapeout/transforms/utils/FileUtils.scala | 2 +- src/main/scala/mdf/macrolib/Utils.scala | 2 +- .../scala/barstools/macros/CostFunction.scala | 1 - .../barstools/macros/SpecificExamples.scala | 3 +- .../tapeout/transforms/GenerateSpec.scala | 18 +++-- .../tapeout/transforms/GenerateTopSpec.scala | 66 ++++++++++++------- .../transforms/ResetInverterSpec.scala | 3 +- 15 files changed, 145 insertions(+), 99 deletions(-) diff --git a/.github/workflows/run-ci.yml b/.github/workflows/run-ci.yml index 492f417a3..78b6b9e68 100644 --- a/.github/workflows/run-ci.yml +++ b/.github/workflows/run-ci.yml @@ -24,3 +24,16 @@ jobs: run: git submodule update --init - name: Test run: sbt test + + doc: + name: Documentation and formatting + runs-on: ubuntu-latest + steps: + - name: Check Formatting + run: sbt scalafmtCheckAll + + all_test_passed: + name: "all tests passed" + runs-on: ubuntu-latest + steps: + - run: echo Success diff --git a/src/main/scala/barstools/macros/CostMetric.scala b/src/main/scala/barstools/macros/CostMetric.scala index a6111d2e4..8b0d04132 100644 --- a/src/main/scala/barstools/macros/CostMetric.scala +++ b/src/main/scala/barstools/macros/CostMetric.scala @@ -128,7 +128,7 @@ object DefaultMetric extends CostMetric with CostMetricCompanion { } val maskPenalty = (memMask, libMask) match { case (None, Some(m)) => 0.001 - case (_, _) => 0 + case (_, _) => 0 } val depthCost = math.ceil(mem.src.depth.toDouble / lib.src.depth.toDouble) val widthCost = math.ceil(memWidth.toDouble / lib.src.width.toDouble) diff --git a/src/main/scala/barstools/macros/MacroCompiler.scala b/src/main/scala/barstools/macros/MacroCompiler.scala index ff75d6458..5a5804b7e 100644 --- a/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/src/main/scala/barstools/macros/MacroCompiler.scala @@ -8,7 +8,7 @@ package barstools.macros import barstools.macros.Utils._ -import firrtl.Utils.{BoolType, one, zero} +import firrtl.Utils.{one, zero, BoolType} import firrtl.annotations._ import firrtl.ir._ import firrtl.stage.{FirrtlSourceAnnotation, FirrtlStage, Forms, OutputFileAnnotation, RunFirrtlTransformAnnotation} @@ -109,16 +109,16 @@ object MacroCompilerAnnotation { * @param forceSynflops Set of memories to force compiling as flops regardless of the mode */ case class Params( - mem: String, - memFormat: Option[String], - lib: Option[String], - hammerIR: Option[String], - costMetric: CostMetric, - mode: CompilerMode, - useCompiler: Boolean, - forceCompile: Set[String], - forceSynflops: Set[String] - ) extends Serializable + mem: String, + memFormat: Option[String], + lib: Option[String], + hammerIR: Option[String], + costMetric: CostMetric, + mode: CompilerMode, + useCompiler: Boolean, + forceCompile: Set[String], + forceSynflops: Set[String]) + extends Serializable /** Create a MacroCompilerAnnotation. * @param c Top-level circuit name (see class description) @@ -869,8 +869,7 @@ object MacroCompiler extends App { case Some("conf") => filterForSRAM(readConfFromPath(params.get(Macros))).get.map(x => (new Macro(x)).blackbox) case _ => - filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))) - .get + filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get .map(x => (new Macro(x)).blackbox) } diff --git a/src/main/scala/barstools/macros/SynFlops.scala b/src/main/scala/barstools/macros/SynFlops.scala index a6fe32a40..f7245144b 100644 --- a/src/main/scala/barstools/macros/SynFlops.scala +++ b/src/main/scala/barstools/macros/SynFlops.scala @@ -3,7 +3,7 @@ package barstools.macros import barstools.macros.Utils._ -import firrtl.Utils.{zero, one} +import firrtl.Utils.{one, zero} import firrtl._ import firrtl.ir._ import firrtl.passes.MemPortUtils.memPortField diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index b30d9d411..ef9c5408d 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -13,20 +13,20 @@ import logger.LazyLogging // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogging { - val synTop: Option[String] = annotations.collectFirst { case SynTopAnnotation(s) => s } - val topFir: Option[String] = annotations.collectFirst { case TopFirAnnotation(s) => s } - val harnessFir: Option[String] = annotations.collectFirst { case HarnessFirAnnotation(s) => s } - val topAnnoOut: Option[String] = annotations.collectFirst { case TopAnnoOutAnnotation(s) => s } + val synTop: Option[String] = annotations.collectFirst { case SynTopAnnotation(s) => s } + val topFir: Option[String] = annotations.collectFirst { case TopFirAnnotation(s) => s } + val harnessFir: Option[String] = annotations.collectFirst { case HarnessFirAnnotation(s) => s } + val topAnnoOut: Option[String] = annotations.collectFirst { case TopAnnoOutAnnotation(s) => s } val harnessAnnoOut: Option[String] = annotations.collectFirst { case HarnessAnnoOutAnnotation(s) => s } - val harnessTop: Option[String] = annotations.collectFirst { case HarnessTopAnnotation(h) => h } - val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } - val harnessOutput: Option[String] = annotations.collectFirst { case HarnessOutputAnnotation(h) => h } - val topDotfOut: Option[String] = annotations.collectFirst { case TopDotfOutAnnotation(h) => h } + val harnessTop: Option[String] = annotations.collectFirst { case HarnessTopAnnotation(h) => h } + val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } + val harnessOutput: Option[String] = annotations.collectFirst { case HarnessOutputAnnotation(h) => h } + val topDotfOut: Option[String] = annotations.collectFirst { case TopDotfOutAnnotation(h) => h } val harnessDotfOut: Option[String] = annotations.collectFirst { case HarnessDotfOutAnnotation(h) => h } val annoFiles: List[String] = annotations.flatMap { case InputAnnotationFileAnnotation(f) => Some(f) - case _ => None + case _ => None }.toList lazy val rootCircuitTarget = CircuitTarget(harnessTop.get) @@ -36,11 +36,11 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // Dump firrtl and annotation files protected def dump( - circuit: Circuit, - annotations: AnnotationSeq, - firFile: Option[String], - annoFile: Option[String] - ): Unit = { + circuit: Circuit, + annotations: AnnotationSeq, + firFile: Option[String], + annoFile: Option[String] + ): Unit = { firFile.foreach { firPath => val outputFile = new java.io.PrintWriter(firPath) outputFile.write(circuit.serialize) @@ -49,9 +49,9 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg annoFile.foreach { annoPath => val outputFile = new java.io.PrintWriter(annoPath) outputFile.write(JsonProtocol.serialize(annotations.filter(_ match { - case _: DeletedAnnotation => false - case _: EmittedComponent => false - case _: EmittedAnnotation[_] => false + case _: DeletedAnnotation => false + case _: EmittedComponent => false + case _: EmittedAnnotation[_] => false case _: FirrtlCircuitAnnotation => false case _ => true }))) @@ -104,10 +104,10 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg val generatorAnnotations = annotations .filterNot(_.isInstanceOf[OutputFileAnnotation]) .map { - case ReplSeqMemAnnotation(i, _) => ReplSeqMemAnnotation(i, harnessConf.get) - case HarnessOutputAnnotation(s) => OutputFileAnnotation(s) - case anno => anno - } ++ harnessAnnos + case ReplSeqMemAnnotation(i, _) => ReplSeqMemAnnotation(i, harnessConf.get) + case HarnessOutputAnnotation(s) => OutputFileAnnotation(s) + case anno => anno + } ++ harnessAnnos val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { @@ -119,7 +119,6 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg } } - object GenerateTop extends StageMain(new TapeoutStage(doHarness = false)) object GenerateTopAndHarness extends StageMain(new TapeoutStage(doHarness = true)) diff --git a/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala b/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala index 82484cce6..b027a7827 100644 --- a/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala +++ b/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala @@ -34,33 +34,42 @@ class ReParentCircuit extends Transform with DependencyAPIMigration { rmap } - val newAnnotations = newTopName.map({ topName => - // Update InstanceTargets and ReferenceTargets - // Yes, these are identical functions, but the copy methods force separate implementations - def updateInstance(t: InstanceTarget): Option[InstanceTarget] = { - val idx = t.path.lastIndexWhere(_._2.value == topName) - if (idx == -1) Some(t.copy(circuit=topName)) else Some(t.copy(circuit=topName, module=topName, path=t.path.drop(idx+1))) - } - def updateReference(t: ReferenceTarget): Option[ReferenceTarget] = { - val idx = t.path.lastIndexWhere(_._2.value == topName) - if (idx == -1) Some(t.copy(circuit=topName)) else Some(t.copy(circuit=topName, module=topName, path=t.path.drop(idx+1))) - } + val newAnnotations = newTopName + .map({ topName => + // Update InstanceTargets and ReferenceTargets + // Yes, these are identical functions, but the copy methods force separate implementations + def updateInstance(t: InstanceTarget): Option[InstanceTarget] = { + val idx = t.path.lastIndexWhere(_._2.value == topName) + if (idx == -1) Some(t.copy(circuit = topName)) + else Some(t.copy(circuit = topName, module = topName, path = t.path.drop(idx + 1))) + } + def updateReference(t: ReferenceTarget): Option[ReferenceTarget] = { + val idx = t.path.lastIndexWhere(_._2.value == topName) + if (idx == -1) Some(t.copy(circuit = topName)) + else Some(t.copy(circuit = topName, module = topName, path = t.path.drop(idx + 1))) + } - AnnotationSeq(state.annotations.toSeq.map({ - case x: SingleTargetAnnotation[InstanceTarget] if x.target.isInstanceOf[InstanceTarget] => - updateInstance(x.target).map(y => x.duplicate(y)) - case x: SingleTargetAnnotation[ReferenceTarget] if x.target.isInstanceOf[ReferenceTarget] => - updateReference(x.target).map(y => x.duplicate(y)) - case x: MultiTargetAnnotation => - val newTargets: Seq[Seq[Option[Target]]] = x.targets.map(_.map({ - case y: InstanceTarget => updateInstance(y) - case y: ReferenceTarget => updateReference(y) - case y => Some(y) - })) - if (newTargets.flatten.forall(_.isDefined)) Some(x.duplicate(newTargets.map(_.map(_.get)))) else None - case x => Some(x) - }).filter(_.isDefined).map(_.get)) - }).getOrElse(state.annotations) + AnnotationSeq( + state.annotations.toSeq + .map({ + case x: SingleTargetAnnotation[InstanceTarget] if x.target.isInstanceOf[InstanceTarget] => + updateInstance(x.target).map(y => x.duplicate(y)) + case x: SingleTargetAnnotation[ReferenceTarget] if x.target.isInstanceOf[ReferenceTarget] => + updateReference(x.target).map(y => x.duplicate(y)) + case x: MultiTargetAnnotation => + val newTargets: Seq[Seq[Option[Target]]] = x.targets.map(_.map({ + case y: InstanceTarget => updateInstance(y) + case y: ReferenceTarget => updateReference(y) + case y => Some(y) + })) + if (newTargets.flatten.forall(_.isDefined)) Some(x.duplicate(newTargets.map(_.map(_.get)))) else None + case x => Some(x) + }) + .filter(_.isDefined) + .map(_.get) + ) + }) + .getOrElse(state.annotations) state.copy(circuit = newCircuit, renames = mainRename, annotations = newAnnotations) } diff --git a/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala b/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala index d6d7b80d2..5d1cbc6cd 100644 --- a/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala +++ b/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala @@ -23,7 +23,7 @@ class RemoveUnusedModules extends Transform with DependencyAPIMigration { def execute(state: CircuitState): CircuitState = { val modulesByName = state.circuit.modules.map { - case m: Module => (m.name, Some(m)) + case m: Module => (m.name, Some(m)) case m: ExtModule => (m.name, None) }.toMap diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 1c50a82e4..7bbb046a5 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -178,4 +178,3 @@ class TapeoutStage(doHarness: Boolean) extends Stage { annotations } } - diff --git a/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala b/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala index 86bf43de2..78d33e103 100644 --- a/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala +++ b/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala @@ -2,7 +2,7 @@ package barstools.tapeout.transforms.utils -import chisel3.experimental.{ChiselAnnotation, annotate} +import chisel3.experimental.{annotate, ChiselAnnotation} import firrtl._ import firrtl.annotations._ import firrtl.stage.Forms diff --git a/src/main/scala/mdf/macrolib/Utils.scala b/src/main/scala/mdf/macrolib/Utils.scala index 86d78a24c..547f910cf 100644 --- a/src/main/scala/mdf/macrolib/Utils.scala +++ b/src/main/scala/mdf/macrolib/Utils.scala @@ -45,7 +45,7 @@ object Utils { } catch { case f: FileNotFoundException => println(s"FILE NOT FOUND $p in dir ${os.pwd}") - throw f + throw f } } } diff --git a/src/test/scala/barstools/macros/CostFunction.scala b/src/test/scala/barstools/macros/CostFunction.scala index ee6c557f3..b3d5b46e6 100644 --- a/src/test/scala/barstools/macros/CostFunction.scala +++ b/src/test/scala/barstools/macros/CostFunction.scala @@ -2,7 +2,6 @@ package barstools.macros import mdf.macrolib.SRAMMacro - /** Tests to check that the cost function mechanism is working properly. */ /** A test metric that simply favours memories with smaller widths, to test that diff --git a/src/test/scala/barstools/macros/SpecificExamples.scala b/src/test/scala/barstools/macros/SpecificExamples.scala index 1a9571994..01d08de72 100644 --- a/src/test/scala/barstools/macros/SpecificExamples.scala +++ b/src/test/scala/barstools/macros/SpecificExamples.scala @@ -187,7 +187,8 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { override val libPrefix = "src/test/resources" - val memSRAMs = mdf.macrolib.Utils.readMDFFromString(""" + val memSRAMs = mdf.macrolib.Utils + .readMDFFromString(""" [ { "type" : "sram", "name" : "_T_182_ext", diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala index b5c03c444..b953d170c 100644 --- a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala +++ b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala @@ -42,10 +42,10 @@ class ToBeMadeExternal extends MultiIOModule { class GenerateExampleTester extends MultiIOModule { val success = IO(Output(Bool())) - val mod = Module(new GenerateExampleModule) + val mod = Module(new GenerateExampleModule) mod.in := 1.U - val mod2 = Module(new ToBeMadeExternal) + val mod2 = Module(new ToBeMadeExternal) mod2.in := 1.U val reg = RegInit(0.U(8.W)) @@ -91,10 +91,14 @@ class GenerateSpec extends AnyFreeSpec { val targetDir = "test_run_dir/generate_spec" generateTestData(targetDir) - GenerateTop.main(Array( - "-i", s"$targetDir/GenerateExampleTester.fir", - "-o", s"$targetDir/GenerateExampleTester.v" - )) - new File(s"$targetDir/GenerateExampleTester.v").exists() should be (true) + GenerateTop.main( + Array( + "-i", + s"$targetDir/GenerateExampleTester.fir", + "-o", + s"$targetDir/GenerateExampleTester.v" + ) + ) + new File(s"$targetDir/GenerateExampleTester.v").exists() should be(true) } } diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala index a7a165a46..d967c8df1 100644 --- a/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala +++ b/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala @@ -19,9 +19,12 @@ class GenerateTopSpec extends AnyFreeSpec with Matchers { GenerateTopAndHarness.main( Array( - "-i", s"$targetDir/ExampleModuleNeedsResetInverted.fir", - "-ll", "info", - "--log-file", transformListName + "-i", + s"$targetDir/ExampleModuleNeedsResetInverted.fir", + "-ll", + "info", + "--log-file", + transformListName ) ) @@ -47,26 +50,45 @@ class GenerateTopSpec extends AnyFreeSpec with Matchers { GenerateTopAndHarness.main( Array( - "--target-dir", "test_run_dir/generate_top_spec", - "-i", s"$targetDir/BlackBoxFloatTester.fir", - "-o", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.v", - "-tho", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.v", - "-i", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.fir", - "--syn-top", "UnitTestSuite", - "--harness-top", "TestHarness", - "-faf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.anno.json", - "-tsaof", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.anno.json", - "-tdf", "firrtl_black_box_resource_files.top.f", - "-tsf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.fir", - "-thaof", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.anno.json", - "-hdf", "firrtl_black_box_resource_files.harness.f", - "-thf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.fir", + "--target-dir", + "test_run_dir/generate_top_spec", + "-i", + s"$targetDir/BlackBoxFloatTester.fir", + "-o", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.v", + "-tho", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.v", + "-i", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.fir", + "--syn-top", + "UnitTestSuite", + "--harness-top", + "TestHarness", + "-faf", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.anno.json", + "-tsaof", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.anno.json", + "-tdf", + "firrtl_black_box_resource_files.top.f", + "-tsf", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.fir", + "-thaof", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.anno.json", + "-hdf", + "firrtl_black_box_resource_files.harness.f", + "-thf", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.fir", "--infer-rw", - "--repl-seq-mem", "-c:TestHarness:-o:chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.mems.conf", - "-thconf", "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.mems.conf", - "-td", "test_run_dir/from-ci", - "-ll", "info", - "--log-file", logOutputName + "--repl-seq-mem", + "-c:TestHarness:-o:chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.mems.conf", + "-thconf", + "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.mems.conf", + "-td", + "test_run_dir/from-ci", + "-ll", + "info", + "--log-file", + logOutputName ) ) diff --git a/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala b/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala index d18053f09..5d4c4ab37 100644 --- a/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala +++ b/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala @@ -22,7 +22,8 @@ class ExampleModuleNeedsResetInverted extends Module with ResetInverter { class ResetNSpec extends AnyFreeSpec with Matchers { "Inverting reset needs to be done throughout module in Chirrtl" in { - val chirrtl = (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--target-dir", "test_run_dir/reset_n_spec")) + val chirrtl = (new ChiselStage) + .emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--target-dir", "test_run_dir/reset_n_spec")) chirrtl should include("input reset :") (chirrtl should not).include("input reset_n :") (chirrtl should not).include("node reset = not(reset_n)") From 143af1aa0417ddcf5bdbc8e4e724fab17f1ea794 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 17 Aug 2021 16:09:52 -0700 Subject: [PATCH 229/273] Fix all warnings in barstool.macros._ - Fixed up all warnings in barstools macros package - mostly public method return types - removed lot's of extraneous parens and braces - Made code cleaner using more explicit macros - Fixed warnings in 2.13 that will likely turn into errors in future --- .../scala/barstools/macros/CostMetric.scala | 51 +++-- .../barstools/macros/MacroCompiler.scala | 159 +++++++------ .../{SynFlops.scala => SynFlopsPass.scala} | 15 +- src/main/scala/barstools/macros/Utils.scala | 209 +++++++++--------- .../scala/barstools/macros/CostFunction.scala | 8 +- .../scala/barstools/macros/Functional.scala | 9 +- .../barstools/macros/MacroCompilerSpec.scala | 136 ++++++------ src/test/scala/barstools/macros/Masks.scala | 100 ++++----- .../scala/barstools/macros/MultiPort.scala | 65 +++--- .../scala/barstools/macros/SRAMCompiler.scala | 6 +- .../barstools/macros/SimpleSplitDepth.scala | 77 +++---- .../barstools/macros/SimpleSplitWidth.scala | 82 +++---- .../barstools/macros/SpecificExamples.scala | 17 +- .../scala/barstools/macros/SynFlops.scala | 76 ++++--- .../tapeout/transforms/GenerateSpec.scala | 6 +- 15 files changed, 500 insertions(+), 516 deletions(-) rename src/main/scala/barstools/macros/{SynFlops.scala => SynFlopsPass.scala} (92%) diff --git a/src/main/scala/barstools/macros/CostMetric.scala b/src/main/scala/barstools/macros/CostMetric.scala index 8b0d04132..647889d34 100644 --- a/src/main/scala/barstools/macros/CostMetric.scala +++ b/src/main/scala/barstools/macros/CostMetric.scala @@ -44,7 +44,7 @@ trait CostMetricCompanion { object OldMetric extends CostMetric with CostMetricCompanion { override def cost(mem: Macro, lib: Macro): Option[Double] = { /* Palmer: A quick cost function (that must be kept in sync with - * memory_cost()) that attempts to avoid compiling unncessary + * memory_cost()) that attempts to avoid compiling unnecessary * memories. This is a lower bound on the cost of compiling a * memory: it assumes 100% bit-cell utilization when mapping. */ // val cost = 100 * (mem.depth * mem.width) / (lib.depth * lib.width) + @@ -52,9 +52,9 @@ object OldMetric extends CostMetric with CostMetricCompanion { ??? } - override def commandLineParams = Map() - override def name = "OldMetric" - override def construct(m: Map[String, String]) = OldMetric + override def commandLineParams() = Map.empty[String, String] + override def name() = "OldMetric" + override def construct(m: Map[String, String]): CostMetric = OldMetric } /** An external cost function. @@ -79,7 +79,7 @@ class ExternalMetric(path: String) extends CostMetric { writeMacroToPath(Some(libFile.getAbsolutePath), lib.src) // !! executes the given command - val result: String = (s"${path} ${memFile.getAbsolutePath} ${libFile.getAbsolutePath}" !!).trim + val result: String = (s"$path ${memFile.getAbsolutePath} ${libFile.getAbsolutePath}" !!).trim // Remove temporary files. memFile.delete() @@ -88,19 +88,19 @@ class ExternalMetric(path: String) extends CostMetric { try { Some(result.toDouble) } catch { - case e: NumberFormatException => None + case _: NumberFormatException => None } } - override def commandLineParams = Map("path" -> path) - override def name = ExternalMetric.name + override def commandLineParams() = Map("path" -> path) + override def name(): String = ExternalMetric.name() } object ExternalMetric extends CostMetricCompanion { - override def name = "ExternalMetric" + override def name() = "ExternalMetric" /** Construct this cost metric from a command line mapping. */ - override def construct(m: Map[String, String]) = { + override def construct(m: Map[String, String]): ExternalMetric = { val pathOption = m.get("path") pathOption match { case Some(path: String) => new ExternalMetric(path) @@ -113,25 +113,24 @@ object ExternalMetric extends CostMetricCompanion { // TODO: write tests for this function to make sure it selects the right things object DefaultMetric extends CostMetric with CostMetricCompanion { override def cost(mem: Macro, lib: Macro): Option[Double] = { - val memMask = mem.src.ports.map(_.maskGran).find(_.isDefined).map(_.get) - val libMask = lib.src.ports.map(_.maskGran).find(_.isDefined).map(_.get) + val memMask = mem.src.ports.map(_.maskGran).find(_.isDefined).flatten + val libMask = lib.src.ports.map(_.maskGran).find(_.isDefined).flatten val memWidth = (memMask, libMask) match { case (None, _) => mem.src.width case (Some(p), None) => (mem.src.width / p) * math.ceil( p.toDouble / lib.src.width ) * lib.src.width //We map the mask to distinct memories - case (Some(p), Some(m)) => { - if (m <= p) (mem.src.width / p) * math.ceil(p.toDouble / m) * m //Using multiple m's to create a p (integeraly) + case (Some(p), Some(m)) => + if (m <= p) (mem.src.width / p) * math.ceil(p.toDouble / m) * m //Using multiple m's to create a p (integrally) else (mem.src.width / p) * m //Waste the extra maskbits - } } val maskPenalty = (memMask, libMask) match { - case (None, Some(m)) => 0.001 + case (None, Some(_)) => 0.001 case (_, _) => 0 } val depthCost = math.ceil(mem.src.depth.toDouble / lib.src.depth.toDouble) - val widthCost = math.ceil(memWidth.toDouble / lib.src.width.toDouble) + val widthCost = math.ceil(memWidth / lib.src.width.toDouble) val bitsCost = (lib.src.depth * lib.src.width).toDouble // Fraction of wasted bits plus const per mem val requestedBits = (mem.src.depth * mem.src.width).toDouble @@ -141,9 +140,9 @@ object DefaultMetric extends CostMetric with CostMetricCompanion { Some(1.0 * bitsWasted / requestedBits + costPerInst + maskPenalty) } - override def commandLineParams = Map() - override def name = "DefaultMetric" - override def construct(m: Map[String, String]) = DefaultMetric + override def commandLineParams() = Map.empty[String, String] + override def name() = "DefaultMetric" + override def construct(m: Map[String, String]): CostMetric = DefaultMetric } object MacroCompilerUtil { @@ -156,11 +155,11 @@ object MacroCompilerUtil { * Used to pass structured values through as an annotation. */ def objToString(o: Serializable): String = { - val baos: ByteArrayOutputStream = new ByteArrayOutputStream - val oos: ObjectOutputStream = new ObjectOutputStream(baos) - oos.writeObject(o) - oos.close() - return Base64.getEncoder.encodeToString(baos.toByteArray) + val byteOutput: ByteArrayOutputStream = new ByteArrayOutputStream + val objectOutput: ObjectOutputStream = new ObjectOutputStream(byteOutput) + objectOutput.writeObject(o) + objectOutput.close() + Base64.getEncoder.encodeToString(byteOutput.toByteArray) } /** Deserialize an arbitrary object from String. */ @@ -169,7 +168,7 @@ object MacroCompilerUtil { val ois: ObjectInputStream = new ObjectInputStream(new ByteArrayInputStream(data)) val o = ois.readObject ois.close() - return o + o } } diff --git a/src/main/scala/barstools/macros/MacroCompiler.scala b/src/main/scala/barstools/macros/MacroCompiler.scala index 5a5804b7e..968e6b30d 100644 --- a/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/src/main/scala/barstools/macros/MacroCompiler.scala @@ -11,14 +11,17 @@ import barstools.macros.Utils._ import firrtl.Utils.{one, zero, BoolType} import firrtl.annotations._ import firrtl.ir._ +import firrtl.options.Dependency +import firrtl.stage.TransformManager.TransformDependency import firrtl.stage.{FirrtlSourceAnnotation, FirrtlStage, Forms, OutputFileAnnotation, RunFirrtlTransformAnnotation} import firrtl.transforms.NoDCEAnnotation import firrtl.{PrimOps, _} import mdf.macrolib.{PolarizedPort, PortPolarity, SRAMCompiler, SRAMGroup, SRAMMacro} import java.io.{File, FileWriter} -import scala.collection.mutable.{ArrayBuffer, HashMap} -import scala.io.Source +import scala.annotation.tailrec +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer case class MacroCompilerException(msg: String) extends Exception(msg) @@ -61,7 +64,7 @@ object MacroCompilerAnnotation { * TODO: Maybe set the default to FallbackSynflops (typical for * vlsi_mem_gen-like scripts) once it's implemented? */ - val Default = CompileAvailable + val Default: CompilerMode = CompileAvailable // Options as list of (CompilerMode, command-line name, description) val options: Seq[(CompilerMode, String, String)] = Seq( @@ -142,8 +145,8 @@ class MacroCompilerPass( // ((0,21), (22,21)) is illegal and will throw an assert private def checkBitPairs(bitPairs: Seq[(BigInt, BigInt)]): Unit = { bitPairs.foldLeft(BigInt(-1))((lastBit, nextPair) => { - assert(lastBit + 1 == nextPair._1, s"Pair's first bit ${nextPair._1} does not follow last bit ${lastBit}"); - assert(nextPair._2 >= nextPair._1, s"Pair ${nextPair} in bitPairs ${bitPairs} is illegal"); + assert(lastBit + 1 == nextPair._1, s"Pair's first bit ${nextPair._1} does not follow last bit $lastBit") + assert(nextPair._2 >= nextPair._1, s"Pair $nextPair in bitPairs $bitPairs is illegal") nextPair._2 }) } @@ -207,10 +210,10 @@ class MacroCompilerPass( // Only the lib is masked. // Not an issue; we can just make all the bits in the lib mask enabled. - case (None, Some(p)) => splitMemory(libWidth) + case (None, Some(_)) => splitMemory(libWidth) // Only the mem is masked. - case (Some(p), None) => { + case (Some(p), None) => if (p % libPort.src.width.get == 0) { // If the mem mask is a multiple of the lib width, then we're good. // Just roll over every lib width as usual. @@ -236,10 +239,9 @@ class MacroCompilerPass( return Seq() } } - } // Both lib and mem are masked. - case (Some(m), Some(l)) => { + case (Some(m), Some(l)) => if (m == l) { // Lib maskGran == mem maskGran, no problems splitMemory(libWidth) @@ -272,7 +274,6 @@ class MacroCompilerPass( return Seq() } } - } } } @@ -289,10 +290,10 @@ class MacroCompilerPass( } } // Add in the last chunk if there are any leftovers - bitPairs += ((currentLSB, mem.src.width.toInt - 1)) + bitPairs += ((currentLSB, mem.src.width - 1)) - bitPairs.toSeq - } + bitPairs + }.toSeq def compile(mem: Macro, lib: Macro): Option[(Module, Macro)] = { assert( @@ -312,9 +313,9 @@ class MacroCompilerPass( // Depth mapping val stmts = ArrayBuffer[Statement]() - val outputs = HashMap[String, ArrayBuffer[(Expression, Expression)]]() - val selects = HashMap[String, Expression]() - val selectRegs = HashMap[String, Expression]() + val outputs = mutable.HashMap[String, ArrayBuffer[(Expression, Expression)]]() + val selects = mutable.HashMap[String, Expression]() + val selectRegs = mutable.HashMap[String, Expression]() /* Palmer: If we've got a parallel memory then we've got to take the * address bits into account. */ if (mem.src.depth > lib.src.depth) { @@ -342,9 +343,9 @@ class MacroCompilerPass( } } } - for ((off, i) <- (BigInt(0).until(mem.src.depth, lib.src.depth)).zipWithIndex) { + for ((_, i) <- BigInt(0).until(mem.src.depth, lib.src.depth).zipWithIndex) { for (j <- bitPairs.indices) { - val name = s"mem_${i}_${j}" + val name = s"mem_${i}_$j" // Create the instance. stmts += WDefInstance(NoInfo, name, lib.src.name, lib.tpe) // Connect extra ports of the lib. @@ -370,7 +371,7 @@ class MacroCompilerPass( } val cats = ArrayBuffer[Expression]() for (((low, high), j) <- bitPairs.zipWithIndex) { - val inst = WRef(s"mem_${i}_${j}", lib.tpe) + val inst = WRef(s"mem_${i}_$j", lib.tpe) def connectPorts2(mem: Expression, lib: String, polarity: Option[PortPolarity]): Statement = Connect(NoInfo, WSubField(inst, lib), portToExpression(mem, polarity)) @@ -396,11 +397,11 @@ class MacroCompilerPass( * together a bunch of narrower memories, which can only be * done after generating all the memories. This saves up the * output statements for later. */ - val name = s"${mem}_${i}_${j}" // This name is the output from the instance (mem vs ${mem}). + val name = s"${mem}_${i}_$j" // This name is the output from the instance (mem vs ${mem}). val exp = portToExpression(bits(WSubField(inst, lib), high - low, 0), Some(lib_polarity)) stmts += DefNode(NoInfo, name, exp) cats += WRef(name) - case (None, Some(lib)) => + case (None, Some(_)) => /* Palmer: If the inner memory has an output port but the outer * one doesn't then it's safe to just leave the outer * port floating. */ @@ -410,7 +411,7 @@ class MacroCompilerPass( * there's nothing to do. */ case (Some(PolarizedPort(mem, _)), None) => System.err.println("WARNING: Unable to match output ports on memory") - System.err.println(s" outer output port: ${mem}") + System.err.println(s" outer output port: $mem") return None } @@ -434,7 +435,7 @@ class MacroCompilerPass( * there's nothing to do. */ case (Some(PolarizedPort(mem, _)), None) => System.err.println("WARNING: Unable to match input ports on memory") - System.err.println(s" outer input port: ${mem}") + System.err.println(s" outer input port: $mem") return None } @@ -452,7 +453,7 @@ class MacroCompilerPass( // If we have a mem maskGran less than the lib's maskGran, we'll have to take the smaller maskGran. // Example: if we have a lib whose maskGran is 8 but our mem's maskGran is 4. // The other case is if we're using a larger lib than mem. - val usingLessThanLibMaskGran = (memPort.src.maskGran.get < libPort.src.effectiveMaskGran) + val usingLessThanLibMaskGran = memPort.src.maskGran.get < libPort.src.effectiveMaskGran val effectiveLibWidth = if (usingLessThanLibMaskGran) memPort.src.maskGran.get @@ -460,24 +461,22 @@ class MacroCompilerPass( libPort.src.width.get cat( - ( - (0 until libPort.src.width.get by libPort.src.effectiveMaskGran) - .map(i => { - if (usingLessThanLibMaskGran && i >= effectiveLibWidth) { - // If the memMaskGran is smaller than the lib's gran, then - // zero out the upper bits. + (0 until libPort.src.width.get by libPort.src.effectiveMaskGran) + .map(i => { + if (usingLessThanLibMaskGran && i >= effectiveLibWidth) { + // If the memMaskGran is smaller than the lib's gran, then + // zero out the upper bits. + zero + } else { + if ((low + i) >= memPort.src.width.get) { + // If our bit is larger than the whole width of the mem, just zero out the upper bits. zero } else { - if ((low + i) >= memPort.src.width.get) { - // If our bit is larger than the whole width of the mem, just zero out the upper bits. - zero - } else { - // Pick the appropriate bit from the mem mask. - bits(WRef(mem), (low + i) / memPort.src.effectiveMaskGran) - } + // Pick the appropriate bit from the mem mask. + bits(WRef(mem), (low + i) / memPort.src.effectiveMaskGran) } - }) - ) + } + }) .reverse ) } @@ -486,7 +485,7 @@ class MacroCompilerPass( * all bits of the lib mask port. */ if (libPort.src.maskPort.isDefined) { val width = libPort.src.width.get / libPort.src.effectiveMaskGran - val value = (BigInt(1) << width.toInt) - 1 + val value = (BigInt(1) << width) - 1 UIntLiteral(value, IntWidth(width)) } else { // No mask ports on either side. @@ -551,17 +550,15 @@ class MacroCompilerPass( * memory that actually has them then we can use the * write enable port instead of the mask port. */ chipEnable match { - case Some(PolarizedPort(en, en_polarity)) => { + case Some(PolarizedPort(en, en_polarity)) => stmts += connectPorts(andAddrMatch(and(memWriteEnable, memMask)), we, we_polarity) stmts += connectPorts(andAddrMatch(memChipEnable), en, en_polarity) - } - case _ => { + case _ => stmts += connectPorts( andAddrMatch(and(and(memWriteEnable, memChipEnable), memMask)), we, we_polarity ) - } } } else { System.err.println("cannot emulate multi-bit mask ports with write enable") @@ -575,9 +572,9 @@ class MacroCompilerPass( // Cat macro outputs for selection memPort.src.output match { case Some(PolarizedPort(mem, _)) if cats.nonEmpty => - val name = s"${mem}_${i}" + val name = s"${mem}_$i" stmts += DefNode(NoInfo, name, cat(cats.toSeq.reverse)) - (outputs.getOrElseUpdate(mem, ArrayBuffer[(Expression, Expression)]())) += + outputs.getOrElseUpdate(mem, ArrayBuffer[(Expression, Expression)]()) += (addrMatchReg -> WRef(name)) case _ => } @@ -590,7 +587,7 @@ class MacroCompilerPass( case Some(PolarizedPort(mem, _)) => outputs.get(mem) match { case Some(select) => - val output = (select.foldRight(zeroOutputValue)) { case ((cond, tval), fval) => + val output = select.foldRight(zeroOutputValue) { case ((cond, tval), fval) => Mux(cond, tval, fval, fval.tpe) } stmts += Connect(NoInfo, WRef(mem), output) @@ -610,11 +607,11 @@ class MacroCompilerPass( // Try to compile each of the memories in mems. // The 'state' is c.modules, which is a list of all the firrtl modules // in the 'circuit'. - (mems.foldLeft(c.modules)) { (modules, mem) => + mems.foldLeft(c.modules) { (modules, mem) => val sram = mem.src def groupMatchesMask(group: SRAMGroup, mem: SRAMMacro): Boolean = { - val memMask = mem.ports.map(_.maskGran).find(_.isDefined).map(_.get) - val libMask = group.ports.map(_.maskGran).find(_.isDefined).map(_.get) + val memMask = mem.ports.map(_.maskGran).find(_.isDefined).flatten + val libMask = group.ports.map(_.maskGran).find(_.isDefined).flatten (memMask, libMask) match { case (None, _) => true case (Some(_), None) => false @@ -623,23 +620,22 @@ class MacroCompilerPass( } // Add compiler memories that might map well to libs val compLibs = compilers match { - case Some(SRAMCompiler(_, groups)) => { + case Some(SRAMCompiler(_, groups)) => groups .filter(g => g.family == sram.family && groupMatchesMask(g, sram)) .map(g => { for { w <- g.width - d <- g.depth if ((sram.width % w == 0) && (sram.depth % d == 0)) + d <- g.depth if (sram.width % w == 0) && (sram.depth % d == 0) } yield Seq(new Macro(buildSRAMMacro(g, d, w, g.vt.head))) }) - } case None => Seq() } val fullLibs = libs ++ compLibs.flatten.flatten // Try to compile mem against each lib in libs, keeping track of the // best compiled version, external lib used, and cost. - val (best, cost) = (fullLibs.foldLeft(None: Option[(Module, Macro)], Double.MaxValue)) { + val (best, _) = fullLibs.foldLeft(None: Option[(Module, Macro)], Double.MaxValue) { case ((best, cost), lib) if mem.src.ports.size != lib.src.ports.size => /* Palmer: FIXME: This just assumes the Chisel and vendor ports are in the same * order, but I'm starting with what actually gets generated. */ @@ -648,15 +644,14 @@ class MacroCompilerPass( case ((best, cost), lib) => // Run the cost function to evaluate this potential compile. costMetric.cost(mem, lib) match { - case Some(newCost) => { + case Some(newCost) => //System.err.println(s"Cost of ${lib.src.name} for ${mem.src.name}: ${newCost}") // Try compiling compile(mem, lib) match { // If it was successful and the new cost is lower - case Some(p) if (newCost < cost) => (Some(p), newCost) - case _ => (best, cost) + case Some(p) if newCost < cost => (Some(p), newCost) + case _ => (best, cost) } - } case _ => (best, cost) // Cost function rejected this combination. } } @@ -665,27 +660,25 @@ class MacroCompilerPass( // in the modules list with a compiled version, as well as the extmodule // stub for the lib. best match { - case None => { + case None => if (mode == MacroCompilerAnnotation.Strict) - throw new MacroCompilerException( + throw MacroCompilerException( s"Target memory ${mem.src.name} could not be compiled and strict mode is activated - aborting." ) else modules - } case Some((mod, bb)) => hammerIR match { - case Some(f) => { + case Some(f) => val hammerIRWriter = new FileWriter(new File(f), !firstLib) if (firstLib) hammerIRWriter.write("[\n") hammerIRWriter.write(bb.src.toJSON().toString()) hammerIRWriter.write("\n,\n") hammerIRWriter.close() firstLib = false - } case None => } - (modules.filterNot(m => m.name == mod.name || m.name == bb.blackbox.name)) ++ Seq(mod, bb.blackbox) + modules.filterNot(m => m.name == mod.name || m.name == bb.blackbox.name) ++ Seq(mod, bb.blackbox) } } case _ => c.modules @@ -695,12 +688,14 @@ class MacroCompilerPass( } class MacroCompilerTransform extends Transform with DependencyAPIMigration { - override def prerequisites = Forms.LowForm - override def optionalPrerequisites = Forms.LowFormOptimized - override def optionalPrerequisiteOf = Forms.LowEmitters + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def optionalPrerequisiteOf: Seq[Dependency[Emitter]] = Forms.LowEmitters override def invalidates(a: Transform) = false - def execute(state: CircuitState) = state.annotations.collect { case a: MacroCompilerAnnotation => a } match { + def execute(state: CircuitState): CircuitState = state.annotations.collect { case a: MacroCompilerAnnotation => + a + } match { case Seq(anno: MacroCompilerAnnotation) => val MacroCompilerAnnotation.Params( memFile, @@ -718,7 +713,7 @@ class MacroCompilerTransform extends Transform with DependencyAPIMigration { } // Check that we don't have any modules both forced to compile and synflops. - assert((forceCompile.intersect(forceSynflops)).isEmpty, "Cannot have modules both forced to compile and synflops") + assert(forceCompile.intersect(forceSynflops).isEmpty, "Cannot have modules both forced to compile and synflops") // Read, eliminate None, get only SRAM, make firrtl macro val mems: Option[Seq[Macro]] = (memFileFormat match { @@ -753,6 +748,7 @@ class MacroCompilerTransform extends Transform with DependencyAPIMigration { memsAdjustedForMode.filterNot(m => forceSynflops.contains(m.src.name)) ++ setToSeqMacro(forceCompile) } val memSynflops: Seq[Macro] = mems.map { actualMems => + // val memsAdjustedForMode = if (mode == MacroCompilerAnnotation.Synflops) actualMems else Seq.empty memsAdjustedForMode.filterNot(m => forceCompile.contains(m.src.name)) ++ setToSeqMacro(forceSynflops) }.getOrElse(Seq.empty) @@ -768,15 +764,15 @@ class MacroCompilerTransform extends Transform with DependencyAPIMigration { }) ) ) - (transforms.foldLeft(state))((s, xform) => xform.runTransform(s)) + transforms.foldLeft(state)((s, xform) => xform.runTransform(s)) case _ => state } } class MacroCompilerOptimizations extends SeqTransform with DependencyAPIMigration { - override def prerequisites = Forms.LowForm - override def optionalPrerequisites = Forms.LowFormOptimized - override def optionalPrerequisiteOf = Forms.LowEmitters + override def prerequisites: Seq[TransformDependency] = Forms.LowForm + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def optionalPrerequisiteOf: Seq[Dependency[Emitter]] = Forms.LowEmitters override def invalidates(a: Transform) = false def transforms: Seq[Transform] = Seq( @@ -823,6 +819,7 @@ object MacroCompiler extends App { " --mode:" ) ++ modeOptions).mkString("\n") + @tailrec def parseArgs( map: MacroParamMap, costMap: CostParamMap, @@ -855,22 +852,22 @@ object MacroCompiler extends App { parseArgs(map, costMap, forcedMemories.copy(_2 = forcedMemories._2 + value), tail) case "--mode" :: value :: tail => parseArgs(map + (Mode -> value), costMap, forcedMemories, tail) - case arg :: tail => + case arg :: _ => println(s"Unknown field $arg\n") println(usage) sys.exit(1) } - def run(args: List[String]) { + def run(args: List[String]): Unit = { val (params, costParams, forcedMemories) = parseArgs(Map[MacroParam, String](), Map[String, String](), (Set.empty, Set.empty), args) try { val macros = params.get(MacrosFormat) match { case Some("conf") => - filterForSRAM(readConfFromPath(params.get(Macros))).get.map(x => (new Macro(x)).blackbox) + filterForSRAM(readConfFromPath(params.get(Macros))).get.map(x => new Macro(x).blackbox) case _ => filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(params.get(Macros))).get - .map(x => (new Macro(x)).blackbox) + .map(x => new Macro(x).blackbox) } if (macros.nonEmpty) { @@ -913,14 +910,13 @@ object MacroCompiler extends App { ) params.get(HammerIR) match { - case Some(hammerIRFile: String) => { - val lines = Source.fromFile(hammerIRFile).getLines().toList + case Some(hammerIRFile: String) => + val lines = FileUtils.getLines(hammerIRFile).toList val hammerIRWriter = new FileWriter(new File(hammerIRFile)) // JSON means we need to destroy the last comma :( lines.dropRight(1).foreach(l => hammerIRWriter.write(l + "\n")) hammerIRWriter.write("]\n") hammerIRWriter.close() - } case None => } } else { @@ -929,11 +925,10 @@ object MacroCompiler extends App { // Emit empty verilog file if no macros found params.get(Verilog) match { - case Some(verilogFile: String) => { + case Some(verilogFile: String) => // Create an empty verilog file val verilogWriter = new FileWriter(new File(verilogFile)) verilogWriter.close() - } case None => } } diff --git a/src/main/scala/barstools/macros/SynFlops.scala b/src/main/scala/barstools/macros/SynFlopsPass.scala similarity index 92% rename from src/main/scala/barstools/macros/SynFlops.scala rename to src/main/scala/barstools/macros/SynFlopsPass.scala index f7245144b..5dda0476a 100644 --- a/src/main/scala/barstools/macros/SynFlops.scala +++ b/src/main/scala/barstools/macros/SynFlopsPass.scala @@ -8,11 +8,13 @@ import firrtl._ import firrtl.ir._ import firrtl.passes.MemPortUtils.memPortField +import scala.collection.mutable + class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pass { - val extraMods = scala.collection.mutable.ArrayBuffer.empty[Module] - lazy val libMods = (libs.map { lib => + val extraMods: mutable.ArrayBuffer[Module] = scala.collection.mutable.ArrayBuffer.empty[Module] + lazy val libMods: Map[String, Module] = libs.map { lib => lib.src.name -> { - val (dataType, dataWidth) = (lib.src.ports.foldLeft(None: Option[BigInt]))((res, port) => + val (dataType, dataWidth) = lib.src.ports.foldLeft(None: Option[BigInt])((res, port) => (res, port.maskPort) match { case (_, None) => res @@ -28,7 +30,6 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa } val maxDepth = firrtl.Utils.min(lib.src.depth, 1 << 26) - val numMems = lib.src.depth / maxDepth // Change macro to be mapped onto to look like the below mem // by changing its depth, and width @@ -46,7 +47,7 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa ) ) ) - val mod_macro = (new MacroCompilerPass(None, None, None, None)).compile(lib, lib_macro) + val mod_macro = new MacroCompilerPass(None, None, None, None).compile(lib, lib_macro) val (real_mod, real_macro) = mod_macro.get val mem = DefMemory( @@ -142,10 +143,10 @@ class SynFlopsPass(synflops: Boolean, libs: Seq[Macro]) extends firrtl.passes.Pa extraMods.append(real_macro.module(Block(mem +: (readConnects ++ writeConnects ++ readwriteConnects)))) real_mod } - }).toMap + }.toMap def run(c: Circuit): Circuit = { if (!synflops) c - else c.copy(modules = (c.modules.map(m => libMods.getOrElse(m.name, m))) ++ extraMods) + else c.copy(modules = c.modules.map(m => libMods.getOrElse(m.name, m)) ++ extraMods) } } diff --git a/src/main/scala/barstools/macros/Utils.scala b/src/main/scala/barstools/macros/Utils.scala index 9afa51f37..1b4fa8fe5 100644 --- a/src/main/scala/barstools/macros/Utils.scala +++ b/src/main/scala/barstools/macros/Utils.scala @@ -15,28 +15,28 @@ object MacroCompilerMath { } class FirrtlMacroPort(port: MacroPort) { - val src = port + val src: MacroPort = port - val isReader = port.output.nonEmpty && port.input.isEmpty - val isWriter = port.input.nonEmpty && port.output.isEmpty - val isReadWriter = port.input.nonEmpty && port.output.nonEmpty + val isReader: Boolean = port.output.nonEmpty && port.input.isEmpty + val isWriter: Boolean = port.input.nonEmpty && port.output.isEmpty + val isReadWriter: Boolean = port.input.nonEmpty && port.output.nonEmpty - val addrType = UIntType(IntWidth(MacroCompilerMath.ceilLog2(port.depth.get).max(1))) - val dataType = UIntType(IntWidth(port.width.get)) - val maskType = UIntType(IntWidth(port.width.get / port.effectiveMaskGran)) + val addrType: UIntType = UIntType(IntWidth(MacroCompilerMath.ceilLog2(port.depth.get).max(1))) + val dataType: UIntType = UIntType(IntWidth(port.width.get)) + val maskType: UIntType = UIntType(IntWidth(port.width.get / port.effectiveMaskGran)) // Bundle representing this macro port. - val tpe = BundleType( + val tpe: BundleType = BundleType( Seq(Field(port.address.name, Flip, addrType)) ++ - (port.clock.map(p => Field(p.name, Flip, ClockType))) ++ - (port.input.map(p => Field(p.name, Flip, dataType))) ++ - (port.output.map(p => Field(p.name, Default, dataType))) ++ - (port.chipEnable.map(p => Field(p.name, Flip, BoolType))) ++ - (port.readEnable.map(p => Field(p.name, Flip, BoolType))) ++ - (port.writeEnable.map(p => Field(p.name, Flip, BoolType))) ++ - (port.maskPort.map(p => Field(p.name, Flip, maskType))) + port.clock.map(p => Field(p.name, Flip, ClockType)) ++ + port.input.map(p => Field(p.name, Flip, dataType)) ++ + port.output.map(p => Field(p.name, Default, dataType)) ++ + port.chipEnable.map(p => Field(p.name, Flip, BoolType)) ++ + port.readEnable.map(p => Field(p.name, Flip, BoolType)) ++ + port.writeEnable.map(p => Field(p.name, Flip, BoolType)) ++ + port.maskPort.map(p => Field(p.name, Flip, maskType)) ) - val ports = tpe.fields.map(f => + val ports: Seq[Port] = tpe.fields.map(f => Port( NoInfo, f.name, @@ -51,30 +51,30 @@ class FirrtlMacroPort(port: MacroPort) { // Reads an SRAMMacro and generates firrtl blackboxes. class Macro(srcMacro: SRAMMacro) { - val src = srcMacro + val src: SRAMMacro = srcMacro - val firrtlPorts = srcMacro.ports.map { new FirrtlMacroPort(_) } + val firrtlPorts: Seq[FirrtlMacroPort] = srcMacro.ports.map { new FirrtlMacroPort(_) } - val writers = firrtlPorts.filter(p => p.isWriter) - val readers = firrtlPorts.filter(p => p.isReader) - val readwriters = firrtlPorts.filter(p => p.isReadWriter) + val writers: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isWriter) + val readers: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isReader) + val readwriters: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isReadWriter) - val sortedPorts = writers ++ readers ++ readwriters - val extraPorts = srcMacro.extraPorts.map { p => + val sortedPorts: Seq[FirrtlMacroPort] = writers ++ readers ++ readwriters + val extraPorts: Seq[(String, UIntLiteral)] = srcMacro.extraPorts.map { p => assert(p.portType == Constant) // TODO: release it? val name = p.name val width = BigInt(p.width.toLong) val value = BigInt(p.value.toLong) - (name -> UIntLiteral(value, IntWidth(width))) + name -> UIntLiteral(value, IntWidth(width)) } // Bundle representing this memory blackbox - val tpe = BundleType(firrtlPorts.flatMap(_.tpe.fields)) + val tpe: BundleType = BundleType(firrtlPorts.flatMap(_.tpe.fields)) - private val modPorts = (firrtlPorts.flatMap(_.ports)) ++ - (extraPorts.map { case (name, value) => Port(NoInfo, name, Input, value.tpe) }) - val blackbox = ExtModule(NoInfo, srcMacro.name, modPorts, srcMacro.name, Nil) - def module(body: Statement) = Module(NoInfo, srcMacro.name, modPorts, body) + private val modPorts = firrtlPorts.flatMap(_.ports) ++ + extraPorts.map { case (name, value) => Port(NoInfo, name, Input, value.tpe) } + val blackbox: ExtModule = ExtModule(NoInfo, srcMacro.name, modPorts, srcMacro.name, Nil) + def module(body: Statement): Module = Module(NoInfo, srcMacro.name, modPorts, body) } object Utils { @@ -87,7 +87,7 @@ object Utils { } // This utility reads a conf in and returns MDF like mdf.macrolib.Utils.readMDFFromPath def readConfFromPath(path: Option[String]): Option[Seq[mdf.macrolib.Macro]] = { - path.map((p) => Utils.readConfFromString(scala.io.Source.fromFile(p).mkString)) + path.map(p => Utils.readConfFromString(FileUtils.getText(p))) } def readConfFromString(str: String): Seq[mdf.macrolib.Macro] = { MemConf.fromString(str).map { m: MemConf => @@ -102,13 +102,13 @@ object Utils { } } def portSpecToFamily(ports: Seq[MemPort]): String = { - val numR = ports.count(_ match { case ReadPort => true; case _ => false }) - val numW = ports.count(_ match { case WritePort | MaskedWritePort => true; case _ => false }) - val numRW = ports.count(_ match { case ReadWritePort | MaskedReadWritePort => true; case _ => false }) + val numR = ports.count { case ReadPort => true; case _ => false } + val numW = ports.count { case WritePort | MaskedWritePort => true; case _ => false } + val numRW = ports.count { case ReadWritePort | MaskedReadWritePort => true; case _ => false } val numRStr = if (numR > 0) s"${numR}r" else "" val numWStr = if (numW > 0) s"${numW}w" else "" val numRWStr = if (numRW > 0) s"${numRW}rw" else "" - return numRStr + numWStr + numRWStr + numRStr + numWStr + numRWStr } // This translates between two represenations of ports def portSpecToMacroPort(width: Int, depth: BigInt, maskGran: Option[Int], ports: Seq[MemPort]): Seq[MacroPort] = { @@ -116,76 +116,69 @@ object Utils { var numW = 0 var numRW = 0 ports.map { - _ match { - case ReadPort => { - val portName = s"R${numR}" - numR += 1 - MacroPort( - width = Some(width), - depth = Some(depth), - address = PolarizedPort(s"${portName}_addr", ActiveHigh), - clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - readEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - output = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) - ) - } - case WritePort => { - val portName = s"W${numW}" - numW += 1 - MacroPort( - width = Some(width), - depth = Some(depth), - address = PolarizedPort(s"${portName}_addr", ActiveHigh), - clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - writeEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - input = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) - ) - } - case MaskedWritePort => { - val portName = s"W${numW}" - numW += 1 - MacroPort( - width = Some(width), - depth = Some(depth), - address = PolarizedPort(s"${portName}_addr", ActiveHigh), - clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - writeEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - maskPort = Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), - maskGran = maskGran, - input = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) - ) - } - case ReadWritePort => { - val portName = s"RW${numRW}" - numRW += 1 - MacroPort( - width = Some(width), - depth = Some(depth), - address = PolarizedPort(s"${portName}_addr", ActiveHigh), - clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - chipEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - writeEnable = Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), - input = Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), - output = Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) - ) - } - case MaskedReadWritePort => { - val portName = s"RW${numRW}" - numRW += 1 - MacroPort( - width = Some(width), - depth = Some(depth), - address = PolarizedPort(s"${portName}_addr", ActiveHigh), - clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), - chipEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), - writeEnable = Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), - maskPort = Some(PolarizedPort(s"${portName}_wmask", ActiveHigh)), - maskGran = maskGran, - input = Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), - output = Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) - ) - } - } + case ReadPort => + val portName = s"R$numR" + numR += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + readEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + case WritePort => + val portName = s"W$numW" + numW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + writeEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + input = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + case MaskedWritePort => + val portName = s"W$numW" + numW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + writeEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + maskPort = Some(PolarizedPort(s"${portName}_mask", ActiveHigh)), + maskGran = maskGran, + input = Some(PolarizedPort(s"${portName}_data", ActiveHigh)) + ) + case ReadWritePort => + val portName = s"RW$numRW" + numRW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + chipEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + writeEnable = Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), + input = Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) + ) + case MaskedReadWritePort => + val portName = s"RW$numRW" + numRW += 1 + MacroPort( + width = Some(width), + depth = Some(depth), + address = PolarizedPort(s"${portName}_addr", ActiveHigh), + clock = Some(PolarizedPort(s"${portName}_clk", PositiveEdge)), + chipEnable = Some(PolarizedPort(s"${portName}_en", ActiveHigh)), + writeEnable = Some(PolarizedPort(s"${portName}_wmode", ActiveHigh)), + maskPort = Some(PolarizedPort(s"${portName}_wmask", ActiveHigh)), + maskGran = maskGran, + input = Some(PolarizedPort(s"${portName}_wdata", ActiveHigh)), + output = Some(PolarizedPort(s"${portName}_rdata", ActiveHigh)) + ) } } def findSRAMCompiler(s: Option[Seq[mdf.macrolib.Macro]]): Option[mdf.macrolib.SRAMCompiler] = { @@ -215,7 +208,7 @@ object Utils { ) } def buildSRAMMacro(g: mdf.macrolib.SRAMGroup, d: Int, w: Int, vt: String): mdf.macrolib.SRAMMacro = { - return mdf.macrolib.SRAMMacro( + mdf.macrolib.SRAMMacro( makeName(g, d, w, vt), w, d, @@ -241,9 +234,9 @@ object Utils { } } - def and(e1: Expression, e2: Expression) = + def and(e1: Expression, e2: Expression): DoPrim = DoPrim(PrimOps.And, Seq(e1, e2), Nil, e1.tpe) - def or(e1: Expression, e2: Expression) = + def or(e1: Expression, e2: Expression): DoPrim = DoPrim(PrimOps.Or, Seq(e1, e2), Nil, e1.tpe) def bits(e: Expression, high: BigInt, low: BigInt): Expression = DoPrim(PrimOps.Bits, Seq(e), Seq(high, low), UIntType(IntWidth(high - low + 1))) @@ -251,7 +244,7 @@ object Utils { def cat(es: Seq[Expression]): Expression = if (es.size == 1) es.head else DoPrim(PrimOps.Cat, Seq(es.head, cat(es.tail)), Nil, UnknownType) - def not(e: Expression) = + def not(e: Expression): DoPrim = DoPrim(PrimOps.Not, Seq(e), Nil, e.tpe) // Convert a port to a FIRRTL expression, handling polarity along the way. diff --git a/src/test/scala/barstools/macros/CostFunction.scala b/src/test/scala/barstools/macros/CostFunction.scala index b3d5b46e6..62ebfcdff 100644 --- a/src/test/scala/barstools/macros/CostFunction.scala +++ b/src/test/scala/barstools/macros/CostFunction.scala @@ -11,9 +11,9 @@ object TestMinWidthMetric extends CostMetric with CostMetricCompanion { // Smaller width = lower cost = favoured override def cost(mem: Macro, lib: Macro): Option[Double] = Some(lib.src.width) - override def commandLineParams = Map() - override def name = "TestMinWidthMetric" - override def construct(m: Map[String, String]) = TestMinWidthMetric + override def commandLineParams() = Map() + override def name() = "TestMinWidthMetric" + override def construct(m: Map[String, String]): CostMetric = TestMinWidthMetric } /** Test that cost metric selection is working. */ @@ -25,7 +25,7 @@ class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { // Cost metrics must be registered for them to work with the command line. CostMetric.registerCostMetric(TestMinWidthMetric) - override val costMetric = Some(TestMinWidthMetric) + override val costMetric: Option[CostMetric] = Some(TestMinWidthMetric) val libSRAMs = Seq( SRAMMacro( diff --git a/src/test/scala/barstools/macros/Functional.scala b/src/test/scala/barstools/macros/Functional.scala index 2b0dfbe0e..9366f6f98 100644 --- a/src/test/scala/barstools/macros/Functional.scala +++ b/src/test/scala/barstools/macros/Functional.scala @@ -1,5 +1,6 @@ package barstools.macros +import firrtl.ir.Circuit import firrtl_interpreter.InterpretiveTester // Functional tests on memory compiler outputs. @@ -10,8 +11,8 @@ class SynchronousReadAndWrite extends MacroCompilerSpec with HasSRAMGenerator wi override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - compile(mem, lib, v, true) - val result = execute(mem, lib, true) + compile(mem, lib, v, synflops = true) + val result: Circuit = execute(mem, lib, synflops = true) it should "run with InterpretedTester" in { pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published @@ -70,8 +71,8 @@ class DontReadCombinationally extends MacroCompilerSpec with HasSRAMGenerator wi override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - compile(mem, lib, v, true) - val result = execute(mem, lib, true) + compile(mem, lib, v, synflops = true) + val result: Circuit = execute(mem, lib, synflops = true) it should "run with InterpretedTester" in { pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published diff --git a/src/test/scala/barstools/macros/MacroCompilerSpec.scala b/src/test/scala/barstools/macros/MacroCompilerSpec.scala index 2b2392272..2cfcaed59 100644 --- a/src/test/scala/barstools/macros/MacroCompilerSpec.scala +++ b/src/test/scala/barstools/macros/MacroCompilerSpec.scala @@ -31,19 +31,18 @@ abstract class MacroCompilerSpec extends AnyFlatSpec with Matchers { private def costMetricCmdLine = { costMetric match { case None => Nil - case Some(m) => { - val name = m.name - val params = m.commandLineParams + case Some(m) => + val name = m.name() + val params = m.commandLineParams() List("-c", name) ++ params.flatMap { case (key, value) => List("-cp", key, value) } - } } } private def args(mem: String, lib: Option[String], v: String, synflops: Boolean, useCompiler: Boolean) = - List("-m", mem.toString, "-v", v) ++ + List("-m", mem, "-v", v) ++ (lib match { case None => Nil - case Some(l) => List("-l", l.toString) + case Some(l) => List("-l", l) }) ++ costMetricCmdLine ++ (if (synflops) List("--mode", "synflops") else Nil) ++ @@ -52,23 +51,23 @@ abstract class MacroCompilerSpec extends AnyFlatSpec with Matchers { // Run the full compiler as if from the command line interface. // Generates the Verilog; useful in testing since an error will throw an // exception. - def compile(mem: String, lib: String, v: String, synflops: Boolean) { + def compile(mem: String, lib: String, v: String, synflops: Boolean): Unit = { compile(mem, Some(lib), v, synflops) } - def compile(mem: String, lib: Option[String], v: String, synflops: Boolean, useCompiler: Boolean = false) { - var mem_full = concat(memPrefix, mem) - var lib_full = concat(libPrefix, lib) - var v_full = concat(vPrefix, v) + def compile(mem: String, lib: Option[String], v: String, synflops: Boolean, useCompiler: Boolean = false): Unit = { + val mem_full = concat(memPrefix, mem) + val lib_full = concat(libPrefix, lib) + val v_full = concat(vPrefix, v) MacroCompiler.run(args(mem_full, lib_full, v_full, synflops, useCompiler)) } // Helper functions to write macro libraries to the given files. - def writeToLib(lib: String, libs: Seq[mdf.macrolib.Macro]) = { + def writeToLib(lib: String, libs: Seq[mdf.macrolib.Macro]): Boolean = { mdf.macrolib.Utils.writeMDFToPath(Some(concat(libPrefix, lib)), libs) } - def writeToMem(mem: String, mems: Seq[mdf.macrolib.Macro]) = { + def writeToMem(mem: String, mems: Seq[mdf.macrolib.Macro]): Boolean = { mdf.macrolib.Utils.writeMDFToPath(Some(concat(memPrefix, mem)), mems) } @@ -89,16 +88,16 @@ abstract class MacroCompilerSpec extends AnyFlatSpec with Matchers { // Compare FIRRTL outputs after reparsing output with ScalaTest ("should be"). def test(result: Circuit, output: String): Unit = { val gold = RemoveEmpty.run(parse(output)) - (result.serialize) should be(gold.serialize) + result.serialize should be(gold.serialize) } // Execute the macro compiler and returns a Circuit containing the output of // the memory compiler. def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean): Circuit = - execute(memFile, libFile, synflops, false) + execute(memFile, libFile, synflops, useCompiler = false) def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean, useCompiler: Boolean): Circuit = { - var mem_full = concat(memPrefix, memFile) - var lib_full = concat(libPrefix, libFile) + val mem_full = concat(memPrefix, memFile) + val lib_full = concat(libPrefix, libFile) require(memFile.isDefined) val mems: Seq[Macro] = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(mem_full)).get.map(new Macro(_)) @@ -126,7 +125,7 @@ abstract class MacroCompilerSpec extends AnyFlatSpec with Matchers { new SynFlopsPass(synflops, libs.getOrElse(mems)), RemoveEmpty ) - val result: Circuit = (passes.foldLeft(circuit))((c, pass) => pass.run(c)) + val result: Circuit = passes.foldLeft(circuit)((c, pass) => pass.run(c)) result } @@ -171,7 +170,7 @@ trait HasSRAMGenerator { output = if (read) Some(PolarizedPort(name = realPrefix + "dout", polarity = ActiveHigh)) else None, input = if (write) Some(PolarizedPort(name = realPrefix + "din", polarity = ActiveHigh)) else None, maskPort = maskGran match { - case Some(x: Int) => Some(PolarizedPort(name = realPrefix + "mask", polarity = ActiveHigh)) + case Some(_: Int) => Some(PolarizedPort(name = realPrefix + "mask", polarity = ActiveHigh)) case _ => None }, maskGran = maskGran, @@ -208,16 +207,7 @@ trait HasSRAMGenerator { depth: Option[BigInt], maskGran: Option[Int] = None ): MacroPort = { - generateTestPort( - prefix, - width, - depth, - maskGran = maskGran, - write = true, - writeEnable = true, - read = true, - readEnable = false - ) + generateTestPort(prefix, width, depth, maskGran = maskGran, write = true, writeEnable = true, read = true) } // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). @@ -241,12 +231,11 @@ trait HasSRAMGenerator { // Generate a "simple" SRAM group (active high/positive edge, 1 read-write port). def generateSimpleSRAMGroup( - prefix: String, - mux: Int, - depth: Range, - width: Range, - maskGran: Option[Int] = None, - extraPorts: Seq[MacroExtraPort] = List() + prefix: String, + mux: Int, + depth: Range, + width: Range, + maskGran: Option[Int] = None ): SRAMGroup = { SRAMGroup( Seq("mygroup_", "width", "x", "depth", "_", "VT"), @@ -291,7 +280,7 @@ trait HasSimpleTestGenerator { def extraTag: String = "" // "Effective" libMaskGran by considering write_enable. - val effectiveLibMaskGran = libMaskGran.getOrElse(libWidth) + val effectiveLibMaskGran: Int = libMaskGran.getOrElse(libWidth) // Override this in the sub-generator if you need a more specific name. // Defaults to using reflection to pull the name of the test using this @@ -301,23 +290,23 @@ trait HasSimpleTestGenerator { //require (memDepth >= libDepth) // Convenience variables to check if a mask exists. - val memHasMask = memMaskGran != None - val libHasMask = libMaskGran != None + val memHasMask: Boolean = memMaskGran.isDefined + val libHasMask: Boolean = libMaskGran.isDefined // We need to figure out how many mask bits there are in the mem. - val memMaskBits = if (memHasMask) memWidth / memMaskGran.get else 0 - val libMaskBits = if (libHasMask) libWidth / libMaskGran.get else 0 + val memMaskBits: Int = if (memHasMask) memWidth / memMaskGran.get else 0 + val libMaskBits: Int = if (libHasMask) libWidth / libMaskGran.get else 0 - val extraTagPrefixed = if (extraTag == "") "" else ("-" + extraTag) + val extraTagPrefixed: String = if (extraTag == "") "" else "-" + extraTag - val mem = s"mem-${generatorType}${extraTagPrefixed}.json" - val lib = s"lib-${generatorType}${extraTagPrefixed}.json" - val v = s"${generatorType}${extraTagPrefixed}.v" + val mem = s"mem-$generatorType$extraTagPrefixed.json" + val lib = s"lib-$generatorType$extraTagPrefixed.json" + val v = s"$generatorType$extraTagPrefixed.v" lazy val mem_name = "target_memory" - val mem_addr_width = MacroCompilerMath.ceilLog2(memDepth) + val mem_addr_width: Int = MacroCompilerMath.ceilLog2(memDepth) lazy val lib_name = "awesome_lib_mem" - val lib_addr_width = MacroCompilerMath.ceilLog2(libDepth) + val lib_addr_width: Int = MacroCompilerMath.ceilLog2(libDepth) // Override these to change the port prefixes if needed. def libPortPrefix: String = "lib" @@ -325,11 +314,11 @@ trait HasSimpleTestGenerator { // These generate "simple" SRAMs (1 masked read-write port) by default, // but can be overridden if need be. - def generateLibSRAM() = generateSRAM(lib_name, libPortPrefix, libWidth, libDepth, libMaskGran, extraPorts) - def generateMemSRAM() = generateSRAM(mem_name, memPortPrefix, memWidth, memDepth, memMaskGran) + def generateLibSRAM(): SRAMMacro = generateSRAM(lib_name, libPortPrefix, libWidth, libDepth, libMaskGran, extraPorts) + def generateMemSRAM(): SRAMMacro = generateSRAM(mem_name, memPortPrefix, memWidth, memDepth, memMaskGran) - def libSRAM = generateLibSRAM - def memSRAM = generateMemSRAM + def libSRAM: SRAMMacro = generateLibSRAM() + def memSRAM: SRAMMacro = generateMemSRAM() def libSRAMs: Seq[SRAMMacro] = Seq(libSRAM) def memSRAMs: Seq[SRAMMacro] = Seq(memSRAM) @@ -340,18 +329,19 @@ trait HasSimpleTestGenerator { // For masks, width it's a bit tricky since we have to consider cases like // memMaskGran = 4 and libMaskGran = 8. // Consider the actually usable libWidth in cases like the above. - val usableLibWidth = if (memMaskGran.getOrElse(Int.MaxValue) < effectiveLibMaskGran) memMaskGran.get else libWidth + val usableLibWidth: Int = + if (memMaskGran.getOrElse(Int.MaxValue) < effectiveLibMaskGran) memMaskGran.get else libWidth // Number of lib instances needed to hold the mem, in both directions. // Round up (e.g. 1.5 instances = effectively 2 instances) - val depthInstances = math.ceil(memDepth.toFloat / libDepth.toFloat).toInt - val widthInstances = math.ceil(memWidth.toFloat / usableLibWidth).toInt + val depthInstances: Int = math.ceil(memDepth.toFloat / libDepth.toFloat).toInt + val widthInstances: Int = math.ceil(memWidth.toFloat / usableLibWidth).toInt // Number of width bits in the last width-direction memory. // e.g. if memWidth = 16 and libWidth = 8, this would be 8 since the last memory 0_1 has 8 bits of input width. // e.g. if memWidth = 9 and libWidth = 8, this would be 1 since the last memory 0_1 has 1 bit of input width. - lazy val lastWidthBits = if (memWidth % usableLibWidth == 0) usableLibWidth else (memWidth % usableLibWidth) - lazy val selectBits = mem_addr_width - lib_addr_width + lazy val lastWidthBits: Int = if (memWidth % usableLibWidth == 0) usableLibWidth else memWidth % usableLibWidth + lazy val selectBits: Int = mem_addr_width - lib_addr_width /** Convenience function to generate a mask statement. * @param widthInst Width instance (mem_0_x) @@ -369,25 +359,25 @@ trait HasSimpleTestGenerator { if (memMaskGran.isEmpty) { // If there is no memory mask, we should just turn all the lib mask // bits high. - s"""mem_${depthInst}_${widthInst}.lib_mask <= UInt<${libMaskBits}>("h${((1 << libMaskBits) - 1).toHexString}")""" + s"""mem_${depthInst}_$widthInst.lib_mask <= UInt<$libMaskBits>("h${((1 << libMaskBits) - 1).toHexString}")""" } else { // Calculate which bit of outer_mask contains the given bit. // e.g. if memMaskGran = 2, libMaskGran = 1 and libWidth = 4, then // calculateMaskBit({0, 1}) = 0 and calculateMaskBit({1, 2}) = 1 def calculateMaskBit(bit: Int): Int = bit / memMaskGran.getOrElse(memWidth) - val bitsArr = ((libMaskBits - 1 to 0 by -1).map(x => { + val bitsArr = (libMaskBits - 1 to 0 by -1).map(x => { if (x * libMaskGran.get > myMemWidth) { // If we have extra mask bits leftover after the effective width, // disable those bits. """UInt<1>("h0")""" } else { val outerMaskBit = calculateMaskBit(x * libMaskGran.get + myBaseBit) - s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" + s"bits(outer_mask, $outerMaskBit, $outerMaskBit)" } - })) + }) val maskVal = bitsArr.reduceRight((bit, rest) => s"cat($bit, $rest)") - s"mem_${depthInst}_${widthInst}.lib_mask <= ${maskVal}" + s"mem_${depthInst}_$widthInst.lib_mask <= $maskVal" } } else "" } @@ -487,7 +477,7 @@ $extraPortsStr require(memSRAM.ports.size == 1, "Header generator only supports single RW port mem") generateReadWriteHeaderPort( memPortPrefix, - memSRAM.ports(0).readEnable.isDefined, + memSRAM.ports.head.readEnable.isDefined, if (memHasMask) Some(memMaskBits) else None ) } @@ -498,7 +488,7 @@ $extraPortsStr s""" circuit $mem_name : module $mem_name : -${generateHeaderPorts} +${generateHeaderPorts()} """ } @@ -507,7 +497,7 @@ ${generateHeaderPorts} require(libSRAM.ports.size == 1, "Footer generator only supports single RW port mem") generateReadWriteFooterPort( libPortPrefix, - libSRAM.ports(0).readEnable.isDefined, + libSRAM.ports.head.readEnable.isDefined, if (libHasMask) Some(libMaskBits) else None, extraPorts.map(p => (p.name, p.width)) ) @@ -517,7 +507,7 @@ ${generateHeaderPorts} def generateFooter(): String = { s""" extmodule $lib_name : -${generateFooterPorts} +${generateFooterPorts()} defname = $lib_name """ @@ -529,13 +519,13 @@ ${generateFooterPorts} // Generate the entire output from header, body, and footer. def generateOutput(): String = { s""" -${generateHeader} -${generateBody} -${generateFooter} +${generateHeader()} +${generateBody()} +${generateFooter()} """ } - val output = generateOutput() + val output: String = generateOutput() } // Use this trait for tests that invoke the memory compiler without lib. @@ -545,12 +535,12 @@ trait HasNoLibTestGenerator extends HasSimpleTestGenerator { // If there isn't a lib, then the "lib" will become a FIRRTL "mem", which // in turn becomes synthesized flops. // Therefore, make "lib" width/depth equal to the mem. - override lazy val libDepth = memDepth - override lazy val libWidth = memWidth - override lazy val lib_name = mem_name + override lazy val libDepth: BigInt = memDepth + override lazy val libWidth: Int = memWidth + override lazy val lib_name: String = mem_name // Do the same for port names. - override lazy val libPortPrefix = memPortPrefix + override lazy val libPortPrefix: String = memPortPrefix // If there is no lib, don't generate a body. - override def generateBody = "" + override def generateBody() = "" } diff --git a/src/test/scala/barstools/macros/Masks.scala b/src/test/scala/barstools/macros/Masks.scala index 43d6b3d67..5854eea14 100644 --- a/src/test/scala/barstools/macros/Masks.scala +++ b/src/test/scala/barstools/macros/Masks.scala @@ -23,9 +23,9 @@ class Masks_FourTypes_NonMaskedMem_NonMaskedLib with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 - override lazy val memMaskGran = None + override lazy val memMaskGran: Option[Int] = None override lazy val libWidth = 8 - override lazy val libMaskGran = None + override lazy val libMaskGran: Option[Int] = None it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -38,9 +38,9 @@ class Masks_FourTypes_NonMaskedMem_MaskedLib with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 - override lazy val memMaskGran = None + override lazy val memMaskGran: Option[Int] = None override lazy val libWidth = 8 - override lazy val libMaskGran = Some(2) + override lazy val libMaskGran: Option[Int] = Some(2) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -53,9 +53,9 @@ class Masks_FourTypes_MaskedMem_NonMaskedLib with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 - override lazy val memMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(8) override lazy val libWidth = 8 - override lazy val libMaskGran = None + override lazy val libMaskGran: Option[Int] = None it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -68,9 +68,9 @@ class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 - override lazy val memMaskGran = Some(4) + override lazy val memMaskGran: Option[Int] = Some(4) override lazy val libWidth = 8 - override lazy val libMaskGran = None + override lazy val libMaskGran: Option[Int] = None it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -83,9 +83,9 @@ class Masks_FourTypes_MaskedMem_MaskedLib with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 - override lazy val memMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(8) override lazy val libWidth = 16 - override lazy val libMaskGran = Some(4) + override lazy val libMaskGran: Option[Int] = Some(4) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -98,9 +98,9 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 32 - override lazy val memMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(8) override lazy val libWidth = 16 - override lazy val libMaskGran = Some(8) + override lazy val libMaskGran: Option[Int] = Some(8) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -113,9 +113,9 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 64 - override lazy val memMaskGran = Some(4) + override lazy val memMaskGran: Option[Int] = Some(4) override lazy val libWidth = 32 - override lazy val libMaskGran = Some(8) + override lazy val libMaskGran: Option[Int] = Some(8) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -127,9 +127,9 @@ class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran class Masks_BitMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { override lazy val depth = BigInt(1024) override lazy val memWidth = 16 - override lazy val memMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(1) override lazy val libWidth = 8 - override lazy val libMaskGran = None + override lazy val libMaskGran: Option[Int] = None it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -144,8 +144,8 @@ class Masks_FPGAStyle_32_8 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 32 - override lazy val memMaskGran = Some(32) - override lazy val libMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(32) + override lazy val libMaskGran: Option[Int] = Some(8) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -160,8 +160,8 @@ class Masks_PowersOfTwo_8_1 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 64 - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -174,8 +174,8 @@ class Masks_PowersOfTwo_16_1 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 64 - override lazy val memMaskGran = Some(16) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(16) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -188,8 +188,8 @@ class Masks_PowersOfTwo_32_1 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 64 - override lazy val memMaskGran = Some(32) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(32) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -202,8 +202,8 @@ class Masks_PowersOfTwo_64_1 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 64 - override lazy val memMaskGran = Some(64) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(64) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -218,8 +218,8 @@ class Masks_PowersOfTwo_32_4 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 128 - override lazy val memMaskGran = Some(32) - override lazy val libMaskGran = Some(4) + override lazy val memMaskGran: Option[Int] = Some(32) + override lazy val libMaskGran: Option[Int] = Some(4) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -232,8 +232,8 @@ class Masks_PowersOfTwo_32_8 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 128 - override lazy val memMaskGran = Some(32) - override lazy val libMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(32) + override lazy val libMaskGran: Option[Int] = Some(8) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -246,8 +246,8 @@ class Masks_PowersOfTwo_8_8 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 128 - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(8) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -262,8 +262,8 @@ class Masks_IntegerMaskMultiple_20_10 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 20 - override lazy val memMaskGran = Some(10) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(10) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -276,8 +276,8 @@ class Masks_IntegerMaskMultiple_21_7 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 21 - override lazy val memMaskGran = Some(21) - override lazy val libMaskGran = Some(7) + override lazy val memMaskGran: Option[Int] = Some(21) + override lazy val libMaskGran: Option[Int] = Some(7) (it should "be enabled when non-power of two masks are supported").is(pending) //~ compileExecuteAndTest(mem, lib, v, output) @@ -289,8 +289,8 @@ class Masks_IntegerMaskMultiple_21_21 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 21 - override lazy val memMaskGran = Some(21) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(21) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -303,8 +303,8 @@ class Masks_IntegerMaskMultiple_84_21 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 84 - override lazy val memMaskGran = Some(21) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(21) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -317,8 +317,8 @@ class Masks_IntegerMaskMultiple_92_23 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 92 - override lazy val memMaskGran = Some(23) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(23) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -331,8 +331,8 @@ class Masks_IntegerMaskMultiple_117_13 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 117 - override lazy val memMaskGran = Some(13) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(13) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -345,8 +345,8 @@ class Masks_IntegerMaskMultiple_160_20 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 160 - override lazy val memMaskGran = Some(20) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(20) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -359,8 +359,8 @@ class Masks_IntegerMaskMultiple_184_23 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 184 - override lazy val memMaskGran = Some(23) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(23) + override lazy val libMaskGran: Option[Int] = Some(1) it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) @@ -375,8 +375,8 @@ class Masks_NonIntegerMaskMultiple_32_3 with HasSimpleDepthTestGenerator with MasksTestSettings { override lazy val width = 32 - override lazy val memMaskGran = Some(3) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(3) + override lazy val libMaskGran: Option[Int] = Some(1) (it should "be enabled when non-power of two masks are supported").is(pending) //~ compileExecuteAndTest(mem, lib, v, output) diff --git a/src/test/scala/barstools/macros/MultiPort.scala b/src/test/scala/barstools/macros/MultiPort.scala index 75eb20d73..07903e0ab 100644 --- a/src/test/scala/barstools/macros/MultiPort.scala +++ b/src/test/scala/barstools/macros/MultiPort.scala @@ -8,10 +8,10 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim override lazy val depth = BigInt(1024) override lazy val memWidth = 64 - override lazy val memMaskGran = Some(16) + override lazy val memMaskGran: Option[Int] = Some(16) override lazy val libWidth = 16 - override def generateMemSRAM() = { + override def generateMemSRAM(): SRAMMacro = { SRAMMacro( name = mem_name, width = memWidth, @@ -42,7 +42,7 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim ) } - override def generateLibSRAM() = { + override def generateLibSRAM(): SRAMMacro = { SRAMMacro( name = lib_name, width = libWidth, @@ -71,16 +71,20 @@ class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSim ) } - override def generateHeaderPorts() = { - generateReadWriteHeaderPort("portA", true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort( + override def generateHeaderPorts(): String = { + generateReadWriteHeaderPort("portA", readEnable = true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort( "portB", - true, + readEnable = true, Some(memMaskBits) ) } - override def generateFooterPorts() = { - generateReadWriteFooterPort("portA", true, None) + "\n" + generateReadWriteFooterPort("portB", true, None) + override def generateFooterPorts(): String = { + generateReadWriteFooterPort("portA", readEnable = true, None) + "\n" + generateReadWriteFooterPort( + "portB", + readEnable = true, + None + ) } override def generateBody() = @@ -151,10 +155,10 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS override lazy val depth = BigInt(1024) override lazy val memWidth = 64 - override lazy val memMaskGran = Some(16) + override lazy val memMaskGran: Option[Int] = Some(16) override lazy val libWidth = 16 - override def generateMemSRAM() = { + override def generateMemSRAM(): SRAMMacro = { SRAMMacro( name = mem_name, width = memWidth, @@ -167,7 +171,6 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS Some(memDepth), maskGran = memMaskGran, write = false, - writeEnable = false, read = true, readEnable = true ), @@ -178,14 +181,13 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS maskGran = memMaskGran, write = true, writeEnable = true, - read = false, - readEnable = false + read = false ) ) ) } - override def generateLibSRAM() = { + override def generateLibSRAM(): SRAMMacro = { SRAMMacro( name = lib_name, width = libWidth, @@ -197,24 +199,15 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS libWidth, libDepth, write = false, - writeEnable = false, read = true, readEnable = true ), - generateTestPort( - "portB", - libWidth, - libDepth, - write = true, - writeEnable = true, - read = false, - readEnable = false - ) + generateTestPort("portB", libWidth, libDepth, write = true, writeEnable = true, read = false) ) ) } - override def generateHeaderPorts() = { + override def generateHeaderPorts(): String = { generatePort( "portA", mem_addr_width, @@ -237,7 +230,7 @@ class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasS ) } - override def generateFooterPorts() = { + override def generateFooterPorts(): String = { generatePort( "portA", lib_addr_width, @@ -310,12 +303,12 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera override lazy val depth = BigInt(1024) override lazy val memWidth = 64 - override lazy val memMaskGran = Some(16) + override lazy val memMaskGran: Option[Int] = Some(16) override lazy val libWidth = 16 lazy val memMaskGranB = 8 // these generators are run at constructor time - override def generateMemSRAM() = { + override def generateMemSRAM(): SRAMMacro = { SRAMMacro( name = mem_name, width = memWidth, @@ -346,7 +339,7 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera ) } - override def generateLibSRAM() = { + override def generateLibSRAM(): SRAMMacro = { SRAMMacro( name = lib_name, width = libWidth, @@ -375,16 +368,20 @@ class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenera ) } - override def generateHeaderPorts() = { - generateReadWriteHeaderPort("portA", true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort( + override def generateHeaderPorts(): String = { + generateReadWriteHeaderPort("portA", readEnable = true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort( "portB", - true, + readEnable = true, Some(memWidth / memMaskGranB) ) } - override def generateFooterPorts() = { - generateReadWriteFooterPort("portA", true, None) + "\n" + generateReadWriteFooterPort("portB", true, None) + override def generateFooterPorts(): String = { + generateReadWriteFooterPort("portA", readEnable = true, None) + "\n" + generateReadWriteFooterPort( + "portB", + readEnable = true, + None + ) } override def generateBody() = diff --git a/src/test/scala/barstools/macros/SRAMCompiler.scala b/src/test/scala/barstools/macros/SRAMCompiler.scala index 17f496011..750283cea 100644 --- a/src/test/scala/barstools/macros/SRAMCompiler.scala +++ b/src/test/scala/barstools/macros/SRAMCompiler.scala @@ -1,7 +1,9 @@ package barstools.macros +import mdf.macrolib + class SRAMCompiler extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - val compiler = generateSRAMCompiler("awesome", "A") + val compiler: macrolib.SRAMCompiler = generateSRAMCompiler("awesome", "A") val verilog = s"v-SRAMCompiler.v" override lazy val depth = BigInt(16) override lazy val memWidth = 8 @@ -15,5 +17,5 @@ class SRAMCompiler extends MacroCompilerSpec with HasSRAMGenerator with HasSimpl writeToMem(mem, Seq(generateSRAM("mymem", "X", 8, 16))) - compileExecuteAndTest(mem, Some(lib), verilog, output = output, false, true) + compileExecuteAndTest(mem, Some(lib), verilog, output = output, useCompiler = true) } diff --git a/src/test/scala/barstools/macros/SimpleSplitDepth.scala b/src/test/scala/barstools/macros/SimpleSplitDepth.scala index 5a7fc77d1..7e02bc1f2 100644 --- a/src/test/scala/barstools/macros/SimpleSplitDepth.scala +++ b/src/test/scala/barstools/macros/SimpleSplitDepth.scala @@ -8,8 +8,8 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => def width: Int - override lazy val memWidth = width - override lazy val libWidth = width + override lazy val memWidth: Int = width + override lazy val libWidth: Int = width // Generate a depth-splitting body. override def generateBody(): String = { @@ -19,37 +19,38 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { output.append( s""" node ${memPortPrefix}_addr_sel = bits(${memPortPrefix}_addr, ${mem_addr_width - 1}, $lib_addr_width) - reg ${memPortPrefix}_addr_sel_reg : UInt<${selectBits}>, ${memPortPrefix}_clk with : + reg ${memPortPrefix}_addr_sel_reg : UInt<$selectBits>, ${memPortPrefix}_clk with : reset => (UInt<1>("h0"), ${memPortPrefix}_addr_sel_reg) ${memPortPrefix}_addr_sel_reg <= mux(UInt<1>("h1"), ${memPortPrefix}_addr_sel, ${memPortPrefix}_addr_sel_reg) """ ) } - for (i <- 0 to depthInstances - 1) { + for (i <- 0 until depthInstances) { + val maskStatement = generateMaskStatement(0, i) val enableIdentifier = - if (selectBits > 0) s"""eq(${memPortPrefix}_addr_sel, UInt<${selectBits}>("h${i.toHexString}"))""" + if (selectBits > 0) s"""eq(${memPortPrefix}_addr_sel, UInt<$selectBits>("h${i.toHexString}"))""" else "UInt<1>(\"h1\")" val chipEnable = s"""UInt<1>("h1")""" val writeEnable = - if (memMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, ${chipEnable})" else s"${memPortPrefix}_write_en" + if (memMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, $chipEnable)" else s"${memPortPrefix}_write_en" output.append( s""" - inst mem_${i}_0 of ${lib_name} + inst mem_${i}_0 of $lib_name mem_${i}_0.${libPortPrefix}_clk <= ${memPortPrefix}_clk mem_${i}_0.${libPortPrefix}_addr <= ${memPortPrefix}_addr node ${memPortPrefix}_dout_${i}_0 = bits(mem_${i}_0.${libPortPrefix}_dout, ${width - 1}, 0) mem_${i}_0.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${width - 1}, 0) - ${maskStatement} - mem_${i}_0.${libPortPrefix}_write_en <= and(and(${writeEnable}, UInt<1>("h1")), ${enableIdentifier}) - node ${memPortPrefix}_dout_${i} = ${memPortPrefix}_dout_${i}_0 + $maskStatement + mem_${i}_0.${libPortPrefix}_write_en <= and(and($writeEnable, UInt<1>("h1")), $enableIdentifier) + node ${memPortPrefix}_dout_$i = ${memPortPrefix}_dout_${i}_0 """ ) } def generate_outer_dout_tree(i: Int, depthInstances: Int): String = { if (i > depthInstances - 1) { - s"""UInt<${libWidth}>("h0")""" + s"""UInt<$libWidth>("h0")""" } else { s"""mux(eq(${memPortPrefix}_addr_sel_reg, UInt<%d>("h%s")), ${memPortPrefix}_dout_%d, %s)""".format( selectBits, @@ -63,7 +64,7 @@ trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { if (selectBits > 0) { output.append(generate_outer_dout_tree(0, depthInstances)) } else { - output.append(s"""mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<${libWidth}>("h0"))""") + output.append(s"""mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<$libWidth>("h0"))""") } output.toString @@ -143,8 +144,8 @@ class SplitDepth2048x32_mrw_lib32 extends MacroCompilerSpec with HasSRAMGenerato override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(32) - override lazy val libMaskGran = Some(32) + override lazy val memMaskGran: Option[Int] = Some(32) + override lazy val libMaskGran: Option[Int] = Some(32) compileExecuteAndTest(mem, lib, v, output) } @@ -153,8 +154,8 @@ class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator override lazy val width = 8 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(8) compileExecuteAndTest(mem, lib, v, output) } @@ -167,8 +168,8 @@ class SplitDepth2048x64_mrw_mem32_lib8 override lazy val width = 64 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(32) - override lazy val libMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(32) + override lazy val libMaskGran: Option[Int] = Some(8) compileExecuteAndTest(mem, lib, v, output) } @@ -181,8 +182,8 @@ class SplitDepth2048x32_mrw_mem16_lib1 override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(16) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(16) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -191,8 +192,8 @@ class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -201,8 +202,8 @@ class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(4) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(4) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -211,8 +212,8 @@ class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(2) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(2) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -222,8 +223,8 @@ class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(3) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(3) + override lazy val libMaskGran: Option[Int] = Some(1) (it should "be enabled when non-power of two masks are supported").is(pending) //compileExecuteAndTest(mem, lib, v, output) @@ -233,8 +234,8 @@ class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(7) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(7) + override lazy val libMaskGran: Option[Int] = Some(1) (it should "be enabled when non-power of two masks are supported").is(pending) //compileExecuteAndTest(mem, lib, v, output) @@ -244,8 +245,8 @@ class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGene override lazy val width = 32 override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran = Some(9) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(9) + override lazy val libMaskGran: Option[Int] = Some(1) (it should "be enabled when non-power of two masks are supported").is(pending) //compileExecuteAndTest(mem, lib, v, output) @@ -318,8 +319,8 @@ class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGener lazy val memDepth = BigInt(2048) lazy val libDepth = BigInt(1024) - override val memPrefix = testDir - override val libPrefix = testDir + override val memPrefix: String = testDir + override val libPrefix: String = testDir import mdf.macrolib._ @@ -476,11 +477,11 @@ class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerato lazy val width = 8 lazy val memDepth = BigInt(2048) lazy val libDepth = BigInt(1024) - lazy val memMaskGran = Some(8) - lazy val libMaskGran = Some(1) + lazy val memMaskGran: Option[Int] = Some(8) + lazy val libMaskGran: Option[Int] = Some(1) - override val memPrefix = testDir - override val libPrefix = testDir + override val memPrefix: String = testDir + override val libPrefix: String = testDir import mdf.macrolib._ diff --git a/src/test/scala/barstools/macros/SimpleSplitWidth.scala b/src/test/scala/barstools/macros/SimpleSplitWidth.scala index 3cd0a6df0..3dffc66f5 100644 --- a/src/test/scala/barstools/macros/SimpleSplitWidth.scala +++ b/src/test/scala/barstools/macros/SimpleSplitWidth.scala @@ -7,23 +7,23 @@ trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => def depth: BigInt - override lazy val memDepth = depth - override lazy val libDepth = depth + override lazy val memDepth: BigInt = depth + override lazy val libDepth: BigInt = depth override def generateBody(): String = { val output = new StringBuilder // Generate mem_0_ lines for number of width instances. output.append( - ((0 to widthInstances - 1).map { i: Int => + (0 until widthInstances).map { i: Int => s""" - inst mem_0_${i} of ${lib_name} + inst mem_0_$i of $lib_name """ - }).reduceLeft(_ + _) + }.reduceLeft(_ + _) ) // Generate submemory connection blocks. - output.append((for (i <- 0 to widthInstances - 1) yield { + output.append((for (i <- 0 until widthInstances) yield { // Width of this submemory. val myMemWidth = if (i == widthInstances - 1) lastWidthBits else usableLibWidth // Base bit of this submemory. @@ -37,34 +37,34 @@ trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { // lib does not. val writeEnableBit = if (libMaskGran.isEmpty && memMaskGran.isDefined) { val outerMaskBit = myBaseBit / memMaskGran.get - s"bits(outer_mask, ${outerMaskBit}, ${outerMaskBit})" + s"bits(outer_mask, $outerMaskBit, $outerMaskBit)" } else """UInt<1>("h1")""" val chipEnable = s"""UInt<1>("h1")""" val writeEnableExpr = - if (libMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, ${chipEnable})" else s"${memPortPrefix}_write_en" + if (libMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, $chipEnable)" else s"${memPortPrefix}_write_en" s""" - mem_0_${i}.${libPortPrefix}_clk <= ${memPortPrefix}_clk - mem_0_${i}.${libPortPrefix}_addr <= ${memPortPrefix}_addr - node ${memPortPrefix}_dout_0_${i} = bits(mem_0_${i}.${libPortPrefix}_dout, ${myMemWidth - 1}, 0) - mem_0_${i}.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${myBaseBit + myMemWidth - 1}, ${myBaseBit}) - ${maskStatement} - mem_0_${i}.${libPortPrefix}_write_en <= and(and(${writeEnableExpr}, ${writeEnableBit}), UInt<1>("h1")) + mem_0_$i.${libPortPrefix}_clk <= ${memPortPrefix}_clk + mem_0_$i.${libPortPrefix}_addr <= ${memPortPrefix}_addr + node ${memPortPrefix}_dout_0_$i = bits(mem_0_$i.${libPortPrefix}_dout, ${myMemWidth - 1}, 0) + mem_0_$i.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${myBaseBit + myMemWidth - 1}, $myBaseBit) + $maskStatement + mem_0_$i.${libPortPrefix}_write_en <= and(and($writeEnableExpr, $writeEnableBit), UInt<1>("h1")) """ }).reduceLeft(_ + _)) // Generate final output that concats together the sub-memories. // e.g. cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0)) output.append { - val doutStatements = ((widthInstances - 1 to 0 by -1).map(i => s"${memPortPrefix}_dout_0_${i}")) + val doutStatements = (widthInstances - 1 to 0 by -1).map(i => s"${memPortPrefix}_dout_0_$i") val catStmt = doutStatements.init.foldRight(doutStatements.last)((l: String, r: String) => s"cat($l, $r)") s""" - node ${memPortPrefix}_dout_0 = ${catStmt} + node ${memPortPrefix}_dout_0 = $catStmt """ } output.append(s""" - ${memPortPrefix}_dout <= mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<${memWidth}>("h0")) + ${memPortPrefix}_dout <= mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<$memWidth>("h0")) """) output.toString } @@ -276,8 +276,8 @@ class SplitWidth1024x8_memGran_8_libGran_1_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 8 override lazy val libWidth = 8 - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -289,8 +289,8 @@ class SplitWidth1024x16_memGran_8_libGran_1_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -302,8 +302,8 @@ class SplitWidth1024x16_memGran_8_libGran_8_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(8) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(8) compileExecuteAndTest(mem, lib, v, output) } @@ -315,8 +315,8 @@ class SplitWidth1024x128_memGran_8_libGran_1_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 128 override lazy val libWidth = 32 - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -328,8 +328,8 @@ class SplitWidth1024x16_memGran_4_libGran_1_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val memMaskGran = Some(4) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(4) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -341,8 +341,8 @@ class SplitWidth1024x16_memGran_2_libGran_1_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val memMaskGran = Some(2) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(2) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -354,8 +354,8 @@ class SplitWidth1024x16_memGran_16_libGran_1_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val memMaskGran = Some(16) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(16) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -366,7 +366,7 @@ class SplitWidth1024x16_libGran_8_rw extends MacroCompilerSpec with HasSRAMGener override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val libMaskGran = Some(8) + override lazy val libMaskGran: Option[Int] = Some(8) compileExecuteAndTest(mem, lib, v, output) } @@ -375,7 +375,7 @@ class SplitWidth1024x16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGener override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val libMaskGran = Some(1) + override lazy val libMaskGran: Option[Int] = Some(1) compileExecuteAndTest(mem, lib, v, output) } @@ -389,8 +389,8 @@ class SplitWidth1024x16_memGran_8_libGran_2_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(2) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(2) compileExecuteAndTest(mem, lib, v, output) } @@ -404,8 +404,8 @@ class SplitWidth1024x16_memGran_9_libGran_1_rw override lazy val depth = BigInt(1024) override lazy val memWidth = 16 override lazy val libWidth = 8 - override lazy val memMaskGran = Some(9) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(9) + override lazy val libMaskGran: Option[Int] = Some(1) (it should "be enabled when non-power of two masks are supported").is(pending) //~ compile(mem, lib, v, false) @@ -424,7 +424,7 @@ class SplitWidth1024x32_readEnable_Lib override lazy val memWidth = 32 override lazy val libWidth = 8 - override def generateLibSRAM() = { + override def generateLibSRAM(): SRAMMacro = { SRAMMacro( name = lib_name, width = libWidth, @@ -492,7 +492,7 @@ class SplitWidth1024x32_readEnable_Mem override lazy val memWidth = 32 override lazy val libWidth = 8 - override def generateMemSRAM() = { + override def generateMemSRAM(): SRAMMacro = { SRAMMacro( name = mem_name, width = memWidth, @@ -528,7 +528,7 @@ class SplitWidth1024x32_readEnable_LibMem override lazy val memWidth = 32 override lazy val libWidth = 8 - override def generateLibSRAM() = { + override def generateLibSRAM(): SRAMMacro = { SRAMMacro( name = lib_name, width = libWidth, @@ -549,7 +549,7 @@ class SplitWidth1024x32_readEnable_LibMem ) } - override def generateMemSRAM() = { + override def generateMemSRAM(): SRAMMacro = { SRAMMacro( name = mem_name, width = memWidth, diff --git a/src/test/scala/barstools/macros/SpecificExamples.scala b/src/test/scala/barstools/macros/SpecificExamples.scala index 01d08de72..b0e2467f3 100644 --- a/src/test/scala/barstools/macros/SpecificExamples.scala +++ b/src/test/scala/barstools/macros/SpecificExamples.scala @@ -1,7 +1,8 @@ // See LICENSE for license details. package barstools.macros -import mdf.macrolib._ +import firrtl.FileUtils +import mdf.macrolib.{Constant, MacroExtraPort, SRAMMacro} // Specific one-off tests to run, not created by a generator. @@ -17,7 +18,7 @@ class GenerateSomeVerilog extends MacroCompilerSpec with HasSRAMGenerator with H } it should "generate non-empty verilog" in { - val verilog = scala.io.Source.fromFile(vPrefix + "/" + v).getLines().mkString("\n") + val verilog = FileUtils.getText(vPrefix + "/" + v) verilog.isEmpty shouldBe false } } @@ -29,7 +30,7 @@ class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { override val libPrefix = "src/test/resources" - val memSRAMs = mdf.macrolib.Utils + val memSRAMs: Seq[mdf.macrolib.Macro] = mdf.macrolib.Utils .readMDFFromString(""" [ { "type" : "sram", @@ -53,7 +54,7 @@ class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { } ], "family" : "1rw" } ] -""").getOrElse(List()) +""").getOrElse(Seq()) writeToMem(mem, memSRAMs) @@ -101,7 +102,7 @@ class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { override val libPrefix = "src/test/resources" - val memSRAMs = mdf.macrolib.Utils + val memSRAMs: Seq[mdf.macrolib.Macro] = mdf.macrolib.Utils .readMDFFromString(""" [ { "type" : "sram", @@ -175,7 +176,7 @@ circuit cc_dir_ext : defname = fake_mem """ - it should "compile, exectue, and test" in { + it should "compile, execute, and test" in { compileExecuteAndTest(mem, lib, v, output) } } @@ -187,7 +188,7 @@ class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { override val libPrefix = "src/test/resources" - val memSRAMs = mdf.macrolib.Utils + val memSRAMs: Seq[mdf.macrolib.Macro] = mdf.macrolib.Utils .readMDFFromString(""" [ { "type" : "sram", @@ -1461,7 +1462,7 @@ class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { ) ) - val memSRAMs = mdf.macrolib.Utils + val memSRAMs: Seq[mdf.macrolib.Macro] = mdf.macrolib.Utils .readMDFFromString(""" [ { diff --git a/src/test/scala/barstools/macros/SynFlops.scala b/src/test/scala/barstools/macros/SynFlops.scala index 0d39220ec..16a3446c7 100644 --- a/src/test/scala/barstools/macros/SynFlops.scala +++ b/src/test/scala/barstools/macros/SynFlops.scala @@ -4,27 +4,27 @@ package barstools.macros trait HasSynFlopsTestGenerator extends HasSimpleTestGenerator { this: MacroCompilerSpec with HasSRAMGenerator => - def generateFlops: String = { + def generateFlops(): String = { s""" - inst mem_0_0 of split_${lib_name} + inst mem_0_0 of split_$lib_name mem_0_0.${libPortPrefix}_clk <= ${libPortPrefix}_clk mem_0_0.${libPortPrefix}_addr <= ${libPortPrefix}_addr node ${libPortPrefix}_dout_0_0 = bits(mem_0_0.${libPortPrefix}_dout, ${libWidth - 1}, 0) mem_0_0.${libPortPrefix}_din <= bits(${libPortPrefix}_din, ${libWidth - 1}, 0) mem_0_0.${libPortPrefix}_write_en <= and(and(and(${libPortPrefix}_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) node ${libPortPrefix}_dout_0 = ${libPortPrefix}_dout_0_0 - ${libPortPrefix}_dout <= mux(UInt<1>("h1"), ${libPortPrefix}_dout_0, UInt<${libWidth}>("h0")) + ${libPortPrefix}_dout <= mux(UInt<1>("h1"), ${libPortPrefix}_dout_0, UInt<$libWidth>("h0")) - module split_${lib_name} : - input ${libPortPrefix}_addr : UInt<${lib_addr_width}> + module split_$lib_name : + input ${libPortPrefix}_addr : UInt<$lib_addr_width> input ${libPortPrefix}_clk : Clock - input ${libPortPrefix}_din : UInt<${libWidth}> - output ${libPortPrefix}_dout : UInt<${libWidth}> + input ${libPortPrefix}_din : UInt<$libWidth> + output ${libPortPrefix}_dout : UInt<$libWidth> input ${libPortPrefix}_write_en : UInt<1> mem ram : - data-type => UInt<${libWidth}> - depth => ${libDepth} + data-type => UInt<$libWidth> + depth => $libDepth read-latency => 1 write-latency => 1 readwriter => RW_0 @@ -40,20 +40,24 @@ trait HasSynFlopsTestGenerator extends HasSimpleTestGenerator { } // If there is no lib, put the flops definition into the body. - abstract override def generateBody = { - if (this.isInstanceOf[HasNoLibTestGenerator]) generateFlops else super.generateBody + abstract override def generateBody(): String = { + if (this.isInstanceOf[HasNoLibTestGenerator]) { + generateFlops() + } else { + super.generateBody() + } } // If there is no lib, don't generate a footer, since the flops definition // will be in the body. - override def generateFooter = { + override def generateFooter(): String = { if (this.isInstanceOf[HasNoLibTestGenerator]) "" else s""" - module ${lib_name} : -${generateFooterPorts} + module $lib_name : +${generateFooterPorts()} -${generateFlops} +${generateFlops()} """ } @@ -67,7 +71,7 @@ class Synflops2048x8_noLib override lazy val memDepth = BigInt(2048) override lazy val memWidth = 8 - compileExecuteAndTest(mem, None, v, output, true) + compileExecuteAndTest(mem, None, v, output, synflops = true) } class Synflops2048x16_noLib @@ -78,7 +82,7 @@ class Synflops2048x16_noLib override lazy val memDepth = BigInt(2048) override lazy val memWidth = 16 - compileExecuteAndTest(mem, None, v, output, true) + compileExecuteAndTest(mem, None, v, output, synflops = true) } class Synflops8192x16_noLib @@ -89,7 +93,7 @@ class Synflops8192x16_noLib override lazy val memDepth = BigInt(8192) override lazy val memWidth = 16 - compileExecuteAndTest(mem, None, v, output, true) + compileExecuteAndTest(mem, None, v, output, synflops = true) } class Synflops2048x16_depth_Lib @@ -101,7 +105,7 @@ class Synflops2048x16_depth_Lib override lazy val libDepth = BigInt(1024) override lazy val width = 16 - compileExecuteAndTest(mem, lib, v, output, true) + compileExecuteAndTest(mem, lib, v, output, synflops = true) } class Synflops2048x64_width_Lib @@ -113,7 +117,7 @@ class Synflops2048x64_width_Lib override lazy val libWidth = 8 override lazy val depth = BigInt(1024) - compileExecuteAndTest(mem, lib, v, output, true) + compileExecuteAndTest(mem, lib, v, output, synflops = true) } class Synflops_SplitPorts_Read_Write @@ -127,7 +131,7 @@ class Synflops_SplitPorts_Read_Write override lazy val libDepth = BigInt(1024) override lazy val width = 8 - override def generateLibSRAM = SRAMMacro( + override def generateLibSRAM(): SRAMMacro = SRAMMacro( name = lib_name, width = width, depth = libDepth, @@ -138,7 +142,7 @@ class Synflops_SplitPorts_Read_Write ) ) - override def generateMemSRAM = SRAMMacro( + override def generateMemSRAM(): SRAMMacro = SRAMMacro( name = mem_name, width = width, depth = memDepth, @@ -149,7 +153,7 @@ class Synflops_SplitPorts_Read_Write ) ) - override def generateHeader = + override def generateHeader() = """ circuit target_memory : module target_memory : @@ -162,7 +166,7 @@ circuit target_memory : input outerA_write_en : UInt<1> """ - override def generateBody = + override def generateBody() = """ node outerB_addr_sel = bits(outerB_addr, 10, 10) reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : @@ -190,7 +194,7 @@ circuit target_memory : outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) """ - override def generateFooterPorts = + override def generateFooterPorts() = """ input innerA_addr : UInt<10> input innerA_clk : Clock @@ -201,7 +205,7 @@ circuit target_memory : input innerB_write_en : UInt<1> """ - override def generateFlops = + override def generateFlops() = """ inst mem_0_0 of split_awesome_lib_mem mem_0_0.innerB_clk <= innerB_clk @@ -243,7 +247,7 @@ circuit target_memory : """ "Non-masked split lib; split mem" should "syn flops fine" in { - compileExecuteAndTest(mem, lib, v, output, true) + compileExecuteAndTest(mem, lib, v, output, synflops = true) } } @@ -257,10 +261,10 @@ class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite override lazy val memDepth = BigInt(2048) override lazy val libDepth = BigInt(1024) override lazy val width = 8 - override lazy val memMaskGran = Some(8) - override lazy val libMaskGran = Some(1) + override lazy val memMaskGran: Option[Int] = Some(8) + override lazy val libMaskGran: Option[Int] = Some(1) - override def generateLibSRAM = SRAMMacro( + override def generateLibSRAM(): SRAMMacro = SRAMMacro( name = lib_name, width = width, depth = libDepth, @@ -271,7 +275,7 @@ class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite ) ) - override def generateMemSRAM = SRAMMacro( + override def generateMemSRAM(): SRAMMacro = SRAMMacro( name = mem_name, width = width, depth = memDepth, @@ -282,7 +286,7 @@ class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite ) ) - override def generateHeader = + override def generateHeader() = """ circuit target_memory : module target_memory : @@ -296,7 +300,7 @@ circuit target_memory : input outerA_mask : UInt<1> """ - override def generateBody = + override def generateBody() = """ node outerB_addr_sel = bits(outerB_addr, 10, 10) reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : @@ -326,7 +330,7 @@ circuit target_memory : outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) """ - override def generateFooterPorts = + override def generateFooterPorts() = """ input innerA_addr : UInt<10> input innerA_clk : Clock @@ -338,7 +342,7 @@ circuit target_memory : input innerB_mask : UInt<8> """ - override def generateFlops = + override def generateFlops() = """ inst mem_0_0 of split_awesome_lib_mem inst mem_0_1 of split_awesome_lib_mem @@ -446,6 +450,6 @@ circuit target_memory : """ "masked split lib; masked split mem" should "syn flops fine" in { - compileExecuteAndTest(mem, lib, v, output, true) + compileExecuteAndTest(mem, lib, v, output, synflops = true) } } diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala index b953d170c..a4dcd7802 100644 --- a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala +++ b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala @@ -17,7 +17,7 @@ class BlackBoxInverter extends ExtModule { val out = IO(Output(Bool())) } -class GenerateExampleModule extends MultiIOModule { +class GenerateExampleModule extends Module { val in = IO(Input(Bool())) val out = IO(Output(Bool())) @@ -30,7 +30,7 @@ class GenerateExampleModule extends MultiIOModule { out := reg } -class ToBeMadeExternal extends MultiIOModule { +class ToBeMadeExternal extends Module { val in = IO(Input(Bool())) val out = IO(Output(Bool())) @@ -39,7 +39,7 @@ class ToBeMadeExternal extends MultiIOModule { out := reg } -class GenerateExampleTester extends MultiIOModule { +class GenerateExampleTester extends Module { val success = IO(Output(Bool())) val mod = Module(new GenerateExampleModule) From db54d55074c72470b4916867498a786988ba28b2 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 17 Aug 2021 16:14:32 -0700 Subject: [PATCH 230/273] This file seems to have missed a scalafmt pass --- src/main/scala/barstools/macros/Utils.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/scala/barstools/macros/Utils.scala b/src/main/scala/barstools/macros/Utils.scala index 1b4fa8fe5..2bcd116fb 100644 --- a/src/main/scala/barstools/macros/Utils.scala +++ b/src/main/scala/barstools/macros/Utils.scala @@ -55,8 +55,8 @@ class Macro(srcMacro: SRAMMacro) { val firrtlPorts: Seq[FirrtlMacroPort] = srcMacro.ports.map { new FirrtlMacroPort(_) } - val writers: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isWriter) - val readers: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isReader) + val writers: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isWriter) + val readers: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isReader) val readwriters: Seq[FirrtlMacroPort] = firrtlPorts.filter(p => p.isReadWriter) val sortedPorts: Seq[FirrtlMacroPort] = writers ++ readers ++ readwriters From c519b269d8590fc2f7f922017d7365a17aa79302 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 17 Aug 2021 16:25:24 -0700 Subject: [PATCH 231/273] Fix scalafmt check --- .github/workflows/run-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/run-ci.yml b/.github/workflows/run-ci.yml index 78b6b9e68..0d13780e5 100644 --- a/.github/workflows/run-ci.yml +++ b/.github/workflows/run-ci.yml @@ -29,6 +29,10 @@ jobs: name: Documentation and formatting runs-on: ubuntu-latest steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Scala + uses: olafurpg/setup-scala@v10 - name: Check Formatting run: sbt scalafmtCheckAll From 6f62c5844865c21e301a3178e7ba2abf49040a46 Mon Sep 17 00:00:00 2001 From: chick Date: Tue, 17 Aug 2021 16:32:18 -0700 Subject: [PATCH 232/273] Oops, missed `needs` in all steps passed --- .github/workflows/run-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/run-ci.yml b/.github/workflows/run-ci.yml index 0d13780e5..bee783810 100644 --- a/.github/workflows/run-ci.yml +++ b/.github/workflows/run-ci.yml @@ -39,5 +39,6 @@ jobs: all_test_passed: name: "all tests passed" runs-on: ubuntu-latest + needs: [test, doc] steps: - run: echo Success From 4f1f9fce47a41d7d1a3e1eb1f065dce03d5873ea Mon Sep 17 00:00:00 2001 From: Tynan McAuley Date: Wed, 2 Feb 2022 15:37:28 -0800 Subject: [PATCH 233/273] Remove sbt subproject "tapeout" Now barstools should be easier to compose in multi-project sbt projects, such as Chipyard. Resolves ucb-bar/barstools#117. --- build.sbt | 57 +++++++++++++++++++++---------------------------------- 1 file changed, 22 insertions(+), 35 deletions(-) diff --git a/build.sbt b/build.sbt index ca8d06393..67d09427e 100644 --- a/build.sbt +++ b/build.sbt @@ -5,39 +5,26 @@ val defaultVersions = Map( "chisel-iotesters" -> "2.5-SNAPSHOT" ) -lazy val commonSettings = Seq( - organization := "edu.berkeley.cs", - version := "0.4-SNAPSHOT", - scalaVersion := "2.12.13", - crossScalaVersions := Seq("2.12.13", "2.13.6"), - scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls"), - libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { - dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) - }, - libraryDependencies ++= Seq( - "com.typesafe.play" %% "play-json" % "2.9.2", - "org.scalatest" %% "scalatest" % "3.2.9" % "test", - "org.apache.logging.log4j" % "log4j-api" % "2.11.2", - "org.apache.logging.log4j" % "log4j-core" % "2.11.2" - ), - resolvers ++= Seq( - Resolver.sonatypeRepo("snapshots"), - Resolver.sonatypeRepo("releases"), - Resolver.mavenLocal - ) +organization := "edu.berkeley.cs" +version := "0.4-SNAPSHOT" +name := "tapeout" +scalaVersion := "2.12.13" +crossScalaVersions := Seq("2.12.13", "2.13.6") +scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls") +Test / scalacOptions ++= Seq("-language:reflectiveCalls") +fork := true +mainClass := Some("barstools.macros.MacroCompiler") +libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { + dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) +} +libraryDependencies ++= Seq( + "com.typesafe.play" %% "play-json" % "2.9.2", + "org.scalatest" %% "scalatest" % "3.2.9" % "test", + "org.apache.logging.log4j" % "log4j-api" % "2.11.2", + "org.apache.logging.log4j" % "log4j-core" % "2.11.2" +) +resolvers ++= Seq( + Resolver.sonatypeRepo("snapshots"), + Resolver.sonatypeRepo("releases"), + Resolver.mavenLocal ) - -//disablePlugins(sbtassembly.AssemblyPlugin) -// -//enablePlugins(sbtassembly.AssemblyPlugin) - -lazy val tapeout = (project in file(".")) - .settings(commonSettings) - .settings(scalacOptions in Test ++= Seq("-language:reflectiveCalls")) - .settings(fork := true) - .settings( - mainClass := Some("barstools.macros.MacroCompiler") - ) - .enablePlugins(sbtassembly.AssemblyPlugin) - -//lazy val root = (project in file(".")).aggregate(tapeout) From d1de92d28787d4fb395e0e631a5c68fa31b02227 Mon Sep 17 00:00:00 2001 From: Tynan McAuley Date: Wed, 2 Feb 2022 15:45:36 -0800 Subject: [PATCH 234/273] Make readme consistent with new sbt setup --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 01e263c3e..30d55252c 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Barstools ![Test](https://github.com/freechipsproject/ucb-bar/barstools/Test/badge.svg) -**Barstools** is a coolection of useful utilities for BAR projects +**Barstools** is a collection of useful utilities for BAR projects Passes/Transforms that could be useful if added here: * Check that a module was de-duplicated. Useful for MIM CAD flows and currently done in python. @@ -18,16 +18,13 @@ Example Usage: ``` sbt > compile -> project tapeout > runMain barstools.tapeout.transforms.GenerateTop -i .fir -o .v --syn-top --harness-top ``` Building the macro compiler JAR: ``` $ sbt [...] -[info] Set current project to root (in build file:/mnt/data/dev/barstools_pcad/) -> project macros -[info] Set current project to macros (in build file:/mnt/data/dev/barstools_pcad/) +[info] Set current project to tapeout (in build file:/mnt/data/dev/barstools_pcad/) > assembly [...] [info] SHA-1: 77d4c759c825fd0ea93dfec26dbbb649f6cd5c89 From a0d1fdb16c04957d9c7f48415fd6c77479259094 Mon Sep 17 00:00:00 2001 From: Tynan McAuley Date: Wed, 2 Feb 2022 17:31:52 -0800 Subject: [PATCH 235/273] Add Chisel compiler plugin See here for more info: https://github.com/chipsalliance/chisel3#build-your-own-chisel-projects --- build.sbt | 1 + 1 file changed, 1 insertion(+) diff --git a/build.sbt b/build.sbt index 67d09427e..8ce174473 100644 --- a/build.sbt +++ b/build.sbt @@ -23,6 +23,7 @@ libraryDependencies ++= Seq( "org.apache.logging.log4j" % "log4j-api" % "2.11.2", "org.apache.logging.log4j" % "log4j-core" % "2.11.2" ) +addCompilerPlugin("edu.berkeley.cs" % "chisel3-plugin" % defaultVersions("chisel3") cross CrossVersion.full) resolvers ++= Seq( Resolver.sonatypeRepo("snapshots"), Resolver.sonatypeRepo("releases"), From adaca59416294898611f3fa2401888526e8bf95a Mon Sep 17 00:00:00 2001 From: Tynan McAuley Date: Mon, 7 Feb 2022 20:47:33 -0800 Subject: [PATCH 236/273] Bump Chisel versions to x.5.1 --- build.sbt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.sbt b/build.sbt index 8ce174473..1a9727640 100644 --- a/build.sbt +++ b/build.sbt @@ -1,8 +1,8 @@ // See LICENSE for license details. val defaultVersions = Map( - "chisel3" -> "3.5-SNAPSHOT", - "chisel-iotesters" -> "2.5-SNAPSHOT" + "chisel3" -> "3.5.1", + "chisel-iotesters" -> "2.5.1" ) organization := "edu.berkeley.cs" From 2635bb4f80131ea7355abf18e6fcd06574bd49ab Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Sat, 8 Oct 2022 10:41:09 -0700 Subject: [PATCH 237/273] No-op barstools SFC compiler --- .../transforms/GenerateTopAndHarness.scala | 88 ++--------- .../transforms/stage/TapeoutStage.scala | 149 ++---------------- 2 files changed, 27 insertions(+), 210 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index ef9c5408d..cede5d38c 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -13,46 +13,28 @@ import logger.LazyLogging // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogging { - val synTop: Option[String] = annotations.collectFirst { case SynTopAnnotation(s) => s } - val topFir: Option[String] = annotations.collectFirst { case TopFirAnnotation(s) => s } - val harnessFir: Option[String] = annotations.collectFirst { case HarnessFirAnnotation(s) => s } - val topAnnoOut: Option[String] = annotations.collectFirst { case TopAnnoOutAnnotation(s) => s } - val harnessAnnoOut: Option[String] = annotations.collectFirst { case HarnessAnnoOutAnnotation(s) => s } - val harnessTop: Option[String] = annotations.collectFirst { case HarnessTopAnnotation(h) => h } - val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } - val harnessOutput: Option[String] = annotations.collectFirst { case HarnessOutputAnnotation(h) => h } - val topDotfOut: Option[String] = annotations.collectFirst { case TopDotfOutAnnotation(h) => h } - val harnessDotfOut: Option[String] = annotations.collectFirst { case HarnessDotfOutAnnotation(h) => h } - - val annoFiles: List[String] = annotations.flatMap { - case InputAnnotationFileAnnotation(f) => Some(f) - case _ => None - }.toList - - lazy val rootCircuitTarget = CircuitTarget(harnessTop.get) - - val topAnnos = synTop.map(st => ReParentCircuitAnnotation(rootCircuitTarget.module(st))) ++ - topDotfOut.map(BlackBoxResourceFileNameAnno) + val outFir: Option[String] = annotations.collectFirst { case OutFirAnnotation(s) => s } + val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } // Dump firrtl and annotation files protected def dump( circuit: Circuit, annotations: AnnotationSeq, - firFile: Option[String], - annoFile: Option[String] ): Unit = { - firFile.foreach { firPath => + outFir.foreach { firPath => val outputFile = new java.io.PrintWriter(firPath) outputFile.write(circuit.serialize) outputFile.close() } - annoFile.foreach { annoPath => + outAnno.foreach { annoPath => val outputFile = new java.io.PrintWriter(annoPath) outputFile.write(JsonProtocol.serialize(annotations.filter(_ match { case _: DeletedAnnotation => false case _: EmittedComponent => false case _: EmittedAnnotation[_] => false case _: FirrtlCircuitAnnotation => false + case _: OutAnnoAnnotation => false + case _: OutFirAnnotation => false case _ => true }))) outputFile.close() @@ -60,65 +42,15 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg } // Top Generation - def executeTop(): Seq[ExtModule] = { - val annos = new FirrtlStage().execute( - Array.empty, - annotations ++ Seq( - RunFirrtlTransformAnnotation(Dependency[ReParentCircuit]), - RunFirrtlTransformAnnotation(Dependency[RemoveUnusedModules]) - ) ++ - topAnnos - ) + def executeTop(): Unit = { + val annos = new FirrtlStage().execute(Array.empty, annotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => - dump(circuit, annos, topFir, topAnnoOut) - circuit.modules.collect { case e: ExtModule => e } - case _ => - throw new Exception(s"executeTop failed while executing FIRRTL!\n") - } - } - - // Top and harness generation - def executeTopAndHarness(): Unit = { - // Execute top and get list of ExtModules to avoid collisions - val topExtModules = executeTop() - - // order is determined by DependencyAPIMigration - val harnessAnnos = - harnessDotfOut.map(BlackBoxResourceFileNameAnno).toSeq ++ - harnessTop.map(ht => ModuleNameSuffixAnnotation(rootCircuitTarget, s"_in${ht}")) ++ - synTop.map(st => ConvertToExtModAnnotation(rootCircuitTarget.module(st))) ++ - Seq( - LinkExtModulesAnnotation(topExtModules), - RunFirrtlTransformAnnotation(Dependency[ConvertToExtMod]), - RunFirrtlTransformAnnotation(Dependency[RemoveUnusedModules]), - RunFirrtlTransformAnnotation(Dependency[AvoidExtModuleCollisions]), - RunFirrtlTransformAnnotation(Dependency[AddSuffixToModuleNames]) - ) - - // For harness run, change some firrtlOptions (below) for harness phase - // customTransforms: setup harness transforms, add AvoidExtModuleCollisions - // outputFileNameOverride: change to harnessOutput - // conf file must change to harnessConf by mapping annotations - - val generatorAnnotations = annotations - .filterNot(_.isInstanceOf[OutputFileAnnotation]) - .map { - case ReplSeqMemAnnotation(i, _) => ReplSeqMemAnnotation(i, harnessConf.get) - case HarnessOutputAnnotation(s) => OutputFileAnnotation(s) - case anno => anno - } ++ harnessAnnos - - val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) - annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { - case Some(circuit) => - dump(circuit, annos, harnessFir, harnessAnnoOut) + dump(circuit, annos) case _ => throw new Exception(s"executeTop failed while executing FIRRTL!\n") } } } -object GenerateTop extends StageMain(new TapeoutStage(doHarness = false)) - -object GenerateTopAndHarness extends StageMain(new TapeoutStage(doHarness = true)) +object GenerateTop extends StageMain(new TapeoutStage) diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 7bbb046a5..67d2715f2 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -14,132 +14,28 @@ sealed trait TapeoutOption extends Unserializable { this: Annotation => } -case class HarnessOutputAnnotation(harnessOutput: String) extends NoTargetAnnotation with TapeoutOption +case class OutFirAnnotation(outFir: String) extends NoTargetAnnotation with TapeoutOption -object HarnessOutputAnnotation extends HasShellOptions { +object OutFirAnnotation extends HasShellOptions { val options: Seq[ShellOption[_]] = Seq( new ShellOption[String]( - longOption = "harness-o", - shortOption = Some("tho"), - toAnnotationSeq = (s: String) => Seq(HarnessOutputAnnotation(s)), - helpText = "use this to generate a harness at " + longOption = "out-fir-file", + shortOption = Some("off"), + toAnnotationSeq = (s: String) => Seq(OutFirAnnotation(s)), + helpText = "out-fir-file" ) ) } -case class SynTopAnnotation(synTop: String) extends NoTargetAnnotation with TapeoutOption +case class OutAnnoAnnotation(outAnno: String) extends NoTargetAnnotation with TapeoutOption -object SynTopAnnotation extends HasShellOptions { +object OutAnnoAnnotation extends HasShellOptions { val options: Seq[ShellOption[_]] = Seq( new ShellOption[String]( - longOption = "syn-top", - shortOption = Some("tst"), - toAnnotationSeq = (s: String) => Seq(SynTopAnnotation(s)), - helpText = "use this to set synTop" - ) - ) -} - -case class TopFirAnnotation(topFir: String) extends NoTargetAnnotation with TapeoutOption - -object TopFirAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "top-fir", - shortOption = Some("tsf"), - toAnnotationSeq = (s: String) => Seq(TopFirAnnotation(s)), - helpText = "use this to set topFir" - ) - ) -} - -case class TopAnnoOutAnnotation(topAnnoOut: String) extends NoTargetAnnotation with TapeoutOption - -object TopAnnoOutAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "top-anno-out", - shortOption = Some("tsaof"), - toAnnotationSeq = (s: String) => Seq(TopAnnoOutAnnotation(s)), - helpText = "use this to set topAnnoOut" - ) - ) -} - -case class TopDotfOutAnnotation(topDotfOut: String) extends NoTargetAnnotation with TapeoutOption - -object TopDotfOutAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "top-dotf-out", - shortOption = Some("tdf"), - toAnnotationSeq = (s: String) => Seq(TopDotfOutAnnotation(s)), - helpText = "use this to set the filename for the top resource .f file" - ) - ) -} - -case class HarnessTopAnnotation(harnessTop: String) extends NoTargetAnnotation with TapeoutOption - -object HarnessTopAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "harness-top", - shortOption = Some("tht"), - toAnnotationSeq = (s: String) => Seq(HarnessTopAnnotation(s)), - helpText = "use this to set harnessTop" - ) - ) -} - -case class HarnessFirAnnotation(harnessFir: String) extends NoTargetAnnotation with TapeoutOption - -object HarnessFirAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "harness-fir", - shortOption = Some("thf"), - toAnnotationSeq = (s: String) => Seq(HarnessFirAnnotation(s)), - helpText = "use this to set harnessFir" - ) - ) -} - -case class HarnessAnnoOutAnnotation(harnessAnnoOut: String) extends NoTargetAnnotation with TapeoutOption - -object HarnessAnnoOutAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "harness-anno-out", - shortOption = Some("thaof"), - toAnnotationSeq = (s: String) => Seq(HarnessAnnoOutAnnotation(s)), - helpText = "use this to set harnessAnnoOut" - ) - ) -} - -case class HarnessDotfOutAnnotation(harnessDotfOut: String) extends NoTargetAnnotation with TapeoutOption - -object HarnessDotfOutAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "harness-dotf-out", - shortOption = Some("hdf"), - toAnnotationSeq = (s: String) => Seq(HarnessDotfOutAnnotation(s)), - helpText = "use this to set the filename for the harness resource .f file" - ) - ) -} - -case class HarnessConfAnnotation(harnessConf: String) extends NoTargetAnnotation with TapeoutOption - -object HarnessConfAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "harness-conf", - shortOption = Some("thconf"), - toAnnotationSeq = (s: String) => Seq(HarnessConfAnnotation(s)), - helpText = "use this to set the harness conf file location" + longOption = "out-anno-file", + shortOption = Some("oaf"), + toAnnotationSeq = (s: String) => Seq(OutAnnoAnnotation(s)), + helpText = "out-anno-file" ) ) } @@ -149,32 +45,21 @@ trait TapeoutCli { parser.note("Tapeout specific options") Seq( - HarnessOutputAnnotation, - SynTopAnnotation, - TopFirAnnotation, - TopAnnoOutAnnotation, - TopDotfOutAnnotation, - HarnessTopAnnotation, - HarnessFirAnnotation, - HarnessAnnoOutAnnotation, - HarnessDotfOutAnnotation, - HarnessConfAnnotation + OutAnnoAnnotation, + OutFirAnnotation, ).foreach(_.addOptions(parser)) } -class TapeoutStage(doHarness: Boolean) extends Stage { +class TapeoutStage extends Stage { override val shell: Shell = new Shell(applicationName = "tapeout") with TapeoutCli with ChiselCli with FirrtlCli override def run(annotations: AnnotationSeq): AnnotationSeq = { Logger.makeScope(annotations) { val generator = new GenerateTopAndHarness(annotations) - if (doHarness) { - generator.executeTopAndHarness() - } else { - generator.executeTop() - } + generator.executeTop() } annotations } } + From cf75889804f640eb5f053396974f5a3f6b8372b6 Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Sun, 9 Oct 2022 17:01:06 -0700 Subject: [PATCH 238/273] Attempt at checking for Fixed types --- .../CheckForUnsupportedFirtoolTypes.scala | 72 +++++++++++++++++++ .../transforms/GenerateTopAndHarness.scala | 14 ++-- .../transforms/stage/TapeoutStage.scala | 14 ---- 3 files changed, 78 insertions(+), 22 deletions(-) create mode 100644 src/main/scala/barstools/tapeout/transforms/CheckForUnsupportedFirtoolTypes.scala diff --git a/src/main/scala/barstools/tapeout/transforms/CheckForUnsupportedFirtoolTypes.scala b/src/main/scala/barstools/tapeout/transforms/CheckForUnsupportedFirtoolTypes.scala new file mode 100644 index 000000000..d584b147e --- /dev/null +++ b/src/main/scala/barstools/tapeout/transforms/CheckForUnsupportedFirtoolTypes.scala @@ -0,0 +1,72 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl._ +import firrtl.annotations.{ModuleTarget, ReferenceTarget, SingleTargetAnnotation} +import firrtl.ir._ +import firrtl.options.Dependency +import firrtl.passes.memlib.ReplSeqMem +import firrtl.stage.Forms +import firrtl.stage.{RunFirrtlTransformAnnotation} +import firrtl.stage.TransformManager.TransformDependency + +class CheckForUnsupportedFirtoolTypes extends Transform with DependencyAPIMigration { + override def prerequisites: Seq[TransformDependency] = Forms.ChirrtlForm + override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty + override def optionalPrerequisiteOf: Seq[TransformDependency] = Seq.empty + override def invalidates(a: Transform): Boolean = false + + def run(state: CircuitState): Boolean = { + val c = state.circuit + + //def checkFixed(t: Type): Unit = { + // println(s"checkFixed -> $t") + // t match { + // case FixedType(_, _) => { + // runLowering = true + // println(s"runLowering is $runLowering") + // } + // case _ => Unit + // } + //} + + def onStmtType(s: Statement): Boolean = { + var runLowering = false + println(s"Entering onStmtType") + + def recursive(s: Statement): Unit = { + s match { + case x: DefRegister => x.foreachType(_ => println(s"It works!")) + case x: DefWire => x.foreachType(_ => println(s"1 It works!")) + case x: DefNode => x.foreachType(_ => println(s"2 It works!")) + case x: DefMemory => x.foreachType(_ => println(s"3 It works!")) + case x: WDefInstance => x.foreachType(_ => println(s"4 It works!")) + case x: Connect => x.foreachType(_ => println(s"5 It works!")) + case x: PartialConnect => x.foreachType(_ => println(s"6 It works!")) + case x: Block => x.foreachStmt(recursive) + case x => x.foreachType(_ => println(s"Uh oh")) + } + } + + //s.foreachType(checkFixed) + s.foreachType(_ => println("Reached")) + + runLowering + } + + val runLoweringOverall = c.modules.map { + case m: ExtModule => false + case m: Module => onStmtType(m.body) + } + + runLoweringOverall.reduce(_ || _) + } + + def execute(state: CircuitState): CircuitState = { + val runLoweringAnnos = Seq(RunFirrtlTransformAnnotation(new MiddleFirrtlEmitter)) + val doLowering = run(state) + println(s"DEBUG: Final doLowering -> $doLowering") + state.copy(annotations = state.annotations) + } +} diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index cede5d38c..44aa37cf3 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -7,13 +7,13 @@ import firrtl.ir._ import firrtl.options.{Dependency, InputAnnotationFileAnnotation, StageMain} import firrtl.passes.memlib.ReplSeqMemAnnotation import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, OutputFileAnnotation, RunFirrtlTransformAnnotation} +import firrtl.passes.{ConvertFixedToSInt} import firrtl.transforms.BlackBoxResourceFileNameAnno import logger.LazyLogging // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogging { - val outFir: Option[String] = annotations.collectFirst { case OutFirAnnotation(s) => s } val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } // Dump firrtl and annotation files @@ -21,11 +21,6 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg circuit: Circuit, annotations: AnnotationSeq, ): Unit = { - outFir.foreach { firPath => - val outputFile = new java.io.PrintWriter(firPath) - outputFile.write(circuit.serialize) - outputFile.close() - } outAnno.foreach { annoPath => val outputFile = new java.io.PrintWriter(annoPath) outputFile.write(JsonProtocol.serialize(annotations.filter(_ match { @@ -34,16 +29,19 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg case _: EmittedAnnotation[_] => false case _: FirrtlCircuitAnnotation => false case _: OutAnnoAnnotation => false - case _: OutFirAnnotation => false case _ => true }))) outputFile.close() } } + // TODO: Filter out blackbox dumping from this FIRRTL step, let CIRCT do it + // Top Generation def executeTop(): Unit = { - val annos = new FirrtlStage().execute(Array.empty, annotations) + val annos = new FirrtlStage().execute(Array.empty, annotations) //++ Seq( + // RunFirrtlTransformAnnotation(Dependency[CheckForUnsupportedFirtoolTypes] + //))) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => dump(circuit, annos) diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 67d2715f2..2ba0bdb89 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -14,19 +14,6 @@ sealed trait TapeoutOption extends Unserializable { this: Annotation => } -case class OutFirAnnotation(outFir: String) extends NoTargetAnnotation with TapeoutOption - -object OutFirAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "out-fir-file", - shortOption = Some("off"), - toAnnotationSeq = (s: String) => Seq(OutFirAnnotation(s)), - helpText = "out-fir-file" - ) - ) -} - case class OutAnnoAnnotation(outAnno: String) extends NoTargetAnnotation with TapeoutOption object OutAnnoAnnotation extends HasShellOptions { @@ -46,7 +33,6 @@ trait TapeoutCli { Seq( OutAnnoAnnotation, - OutFirAnnotation, ).foreach(_.addOptions(parser)) } From d1295e68f87e2adb377a97b75967fc20cdca5950 Mon Sep 17 00:00:00 2001 From: joey0320 Date: Fri, 23 Dec 2022 11:21:08 -0800 Subject: [PATCH 239/273] Add back HarnessConf --- .../transforms/GenerateTopAndHarness.scala | 33 ++++++++++++++++++- .../transforms/stage/TapeoutStage.scala | 22 +++++++++++-- 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 44aa37cf3..448d6b1a0 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -15,6 +15,12 @@ import logger.LazyLogging // and a second to remove those modules to generate the test harness private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogging { val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } + val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } + + val annoFiles: List[String] = annotations.flatMap { + case InputAnnotationFileAnnotation(f) => Some(f) + case _ => None + }.toList // Dump firrtl and annotation files protected def dump( @@ -49,6 +55,31 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg throw new Exception(s"executeTop failed while executing FIRRTL!\n") } } + + // Top and harness generation + def executeTopAndHarness(): Unit = { + executeTop() + + // For harness run, change some firrtlOptions (below) for harness phase + // customTransforms: setup harness transforms, add AvoidExtModuleCollisions + // outputFileNameOverride: change to harnessOutput + // conf file must change to harnessConf by mapping annotations + val generatorAnnotations = annotations + .filterNot(_.isInstanceOf[OutputFileAnnotation]) + .map { + case ReplSeqMemAnnotation(i, _) => ReplSeqMemAnnotation(i, harnessConf.get) + case anno => anno + } + + val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) + annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { + case Some(circuit) => + dump(circuit, annos) + case _ => + throw new Exception(s"executeTop failed while executing FIRRTL!\n") + } + } } -object GenerateTop extends StageMain(new TapeoutStage) +object GenerateTop extends StageMain(new TapeoutStage(doHarness = false)) +object GenerateTopAndHarness extends StageMain(new TapeoutStage(doHarness = true)) diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 2ba0bdb89..9de5e04f5 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -27,23 +27,41 @@ object OutAnnoAnnotation extends HasShellOptions { ) } +case class HarnessConfAnnotation(harnessConf: String) extends NoTargetAnnotation with TapeoutOption + +object HarnessConfAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "harness-conf", + shortOption = Some("thconf"), + toAnnotationSeq = (s: String) => Seq(HarnessConfAnnotation(s)), + helpText = "use this to set the harness conf file location" + ) + ) +} + trait TapeoutCli { this: Shell => parser.note("Tapeout specific options") Seq( OutAnnoAnnotation, + HarnessConfAnnotation, ).foreach(_.addOptions(parser)) } -class TapeoutStage extends Stage { +class TapeoutStage(doHarness: Boolean) extends Stage { override val shell: Shell = new Shell(applicationName = "tapeout") with TapeoutCli with ChiselCli with FirrtlCli override def run(annotations: AnnotationSeq): AnnotationSeq = { Logger.makeScope(annotations) { val generator = new GenerateTopAndHarness(annotations) - generator.executeTop() + if (doHarness) { + generator.executeTopAndHarness() + } else { + generator.executeTop() + } } annotations } From 850f613a14662c143ccf22b88c2078aa72898ece Mon Sep 17 00:00:00 2001 From: joey0320 Date: Fri, 23 Dec 2022 20:11:07 -0800 Subject: [PATCH 240/273] Remove CheckForUnsupportedFirtoolTypes --- .../CheckForUnsupportedFirtoolTypes.scala | 72 ------------------- .../transforms/GenerateTopAndHarness.scala | 5 +- 2 files changed, 2 insertions(+), 75 deletions(-) delete mode 100644 src/main/scala/barstools/tapeout/transforms/CheckForUnsupportedFirtoolTypes.scala diff --git a/src/main/scala/barstools/tapeout/transforms/CheckForUnsupportedFirtoolTypes.scala b/src/main/scala/barstools/tapeout/transforms/CheckForUnsupportedFirtoolTypes.scala deleted file mode 100644 index d584b147e..000000000 --- a/src/main/scala/barstools/tapeout/transforms/CheckForUnsupportedFirtoolTypes.scala +++ /dev/null @@ -1,72 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.annotations.{ModuleTarget, ReferenceTarget, SingleTargetAnnotation} -import firrtl.ir._ -import firrtl.options.Dependency -import firrtl.passes.memlib.ReplSeqMem -import firrtl.stage.Forms -import firrtl.stage.{RunFirrtlTransformAnnotation} -import firrtl.stage.TransformManager.TransformDependency - -class CheckForUnsupportedFirtoolTypes extends Transform with DependencyAPIMigration { - override def prerequisites: Seq[TransformDependency] = Forms.ChirrtlForm - override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty - override def optionalPrerequisiteOf: Seq[TransformDependency] = Seq.empty - override def invalidates(a: Transform): Boolean = false - - def run(state: CircuitState): Boolean = { - val c = state.circuit - - //def checkFixed(t: Type): Unit = { - // println(s"checkFixed -> $t") - // t match { - // case FixedType(_, _) => { - // runLowering = true - // println(s"runLowering is $runLowering") - // } - // case _ => Unit - // } - //} - - def onStmtType(s: Statement): Boolean = { - var runLowering = false - println(s"Entering onStmtType") - - def recursive(s: Statement): Unit = { - s match { - case x: DefRegister => x.foreachType(_ => println(s"It works!")) - case x: DefWire => x.foreachType(_ => println(s"1 It works!")) - case x: DefNode => x.foreachType(_ => println(s"2 It works!")) - case x: DefMemory => x.foreachType(_ => println(s"3 It works!")) - case x: WDefInstance => x.foreachType(_ => println(s"4 It works!")) - case x: Connect => x.foreachType(_ => println(s"5 It works!")) - case x: PartialConnect => x.foreachType(_ => println(s"6 It works!")) - case x: Block => x.foreachStmt(recursive) - case x => x.foreachType(_ => println(s"Uh oh")) - } - } - - //s.foreachType(checkFixed) - s.foreachType(_ => println("Reached")) - - runLowering - } - - val runLoweringOverall = c.modules.map { - case m: ExtModule => false - case m: Module => onStmtType(m.body) - } - - runLoweringOverall.reduce(_ || _) - } - - def execute(state: CircuitState): CircuitState = { - val runLoweringAnnos = Seq(RunFirrtlTransformAnnotation(new MiddleFirrtlEmitter)) - val doLowering = run(state) - println(s"DEBUG: Final doLowering -> $doLowering") - state.copy(annotations = state.annotations) - } -} diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 448d6b1a0..b1d6b79ad 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -45,9 +45,8 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // Top Generation def executeTop(): Unit = { - val annos = new FirrtlStage().execute(Array.empty, annotations) //++ Seq( - // RunFirrtlTransformAnnotation(Dependency[CheckForUnsupportedFirtoolTypes] - //))) + val annos = new FirrtlStage().execute(Array.empty, annotations) + annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => dump(circuit, annos) From 2dfa1847e7199a15bf64f4cf9283a7d1c9957a58 Mon Sep 17 00:00:00 2001 From: joey0320 Date: Fri, 23 Dec 2022 20:38:40 -0800 Subject: [PATCH 241/273] Fix formatting --- .../tapeout/transforms/GenerateTopAndHarness.scala | 8 ++++---- .../barstools/tapeout/transforms/stage/TapeoutStage.scala | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index b1d6b79ad..4df6e7999 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -14,8 +14,8 @@ import logger.LazyLogging // Requires two phases, one to collect modules below synTop in the hierarchy // and a second to remove those modules to generate the test harness private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogging { - val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } - val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } + val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } + val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } val annoFiles: List[String] = annotations.flatMap { case InputAnnotationFileAnnotation(f) => Some(f) @@ -25,7 +25,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // Dump firrtl and annotation files protected def dump( circuit: Circuit, - annotations: AnnotationSeq, + annotations: AnnotationSeq ): Unit = { outAnno.foreach { annoPath => val outputFile = new java.io.PrintWriter(annoPath) @@ -34,7 +34,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg case _: EmittedComponent => false case _: EmittedAnnotation[_] => false case _: FirrtlCircuitAnnotation => false - case _: OutAnnoAnnotation => false + case _: OutAnnoAnnotation => false case _ => true }))) outputFile.close() diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 9de5e04f5..338cbc869 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -46,7 +46,7 @@ trait TapeoutCli { Seq( OutAnnoAnnotation, - HarnessConfAnnotation, + HarnessConfAnnotation ).foreach(_.addOptions(parser)) } @@ -66,4 +66,3 @@ class TapeoutStage(doHarness: Boolean) extends Stage { annotations } } - From 13e2bb92ab7360f0ce3b2ff4d27b3024736111fb Mon Sep 17 00:00:00 2001 From: joey0320 Date: Fri, 23 Dec 2022 20:50:42 -0800 Subject: [PATCH 242/273] Remove GenerateTopSpec.scala test as the FIRRTL passes are removed --- .../tapeout/transforms/GenerateTopSpec.scala | 98 ------------------- 1 file changed, 98 deletions(-) delete mode 100644 src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala deleted file mode 100644 index d967c8df1..000000000 --- a/src/test/scala/barstools/tapeout/transforms/GenerateTopSpec.scala +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package barstools.tapeout.transforms - -import chisel3.stage.ChiselStage -import firrtl.FileUtils -import org.scalatest.freespec.AnyFreeSpec -import org.scalatest.matchers.should.Matchers - -import java.io.{File, PrintWriter} - -class GenerateTopSpec extends AnyFreeSpec with Matchers { - "Generate top and harness" - { - "should include the following transforms" in { - val targetDir = "test_run_dir/generate_top_and_harness" - val transformListName = s"$targetDir/ExampleModuleNeesResetInvertTransforms.log" - FileUtils.makeDirectory(targetDir) - (new ChiselStage).emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--target-dir", targetDir)) - - GenerateTopAndHarness.main( - Array( - "-i", - s"$targetDir/ExampleModuleNeedsResetInverted.fir", - "-ll", - "info", - "--log-file", - transformListName - ) - ) - - val output = FileUtils.getText(transformListName) - output should include("barstools.tapeout.transforms.AddSuffixToModuleNames") - output should include("barstools.tapeout.transforms.ConvertToExtMod") - output should include("barstools.tapeout.transforms.RemoveUnusedModules") - output should include("barstools.tapeout.transforms.AvoidExtModuleCollisions") - } - } - - "generate harness should be generated" ignore { - val targetDir = "test_run_dir/generate_top_spec" - val logOutputName = s"$targetDir/top_spec_output.log" - FileUtils.makeDirectory(targetDir) - - val input = FileUtils.getLinesResource("/BlackBoxFloatTester.fir") - val printWriter = new PrintWriter(new File(s"$targetDir/BlackBoxFloatTester.fir")) - printWriter.write(input.mkString("\n")) - printWriter.close() - - println(s"""Resource: ${input.mkString("\n")}""") - - GenerateTopAndHarness.main( - Array( - "--target-dir", - "test_run_dir/generate_top_spec", - "-i", - s"$targetDir/BlackBoxFloatTester.fir", - "-o", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.v", - "-tho", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.v", - "-i", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.fir", - "--syn-top", - "UnitTestSuite", - "--harness-top", - "TestHarness", - "-faf", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.anno.json", - "-tsaof", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.anno.json", - "-tdf", - "firrtl_black_box_resource_files.top.f", - "-tsf", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.fir", - "-thaof", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.anno.json", - "-hdf", - "firrtl_black_box_resource_files.harness.f", - "-thf", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.fir", - "--infer-rw", - "--repl-seq-mem", - "-c:TestHarness:-o:chipyard.unittest.TestHarness.IceNetUnitTestConfig.top.mems.conf", - "-thconf", - "chipyard.unittest.TestHarness.IceNetUnitTestConfig.harness.mems.conf", - "-td", - "test_run_dir/from-ci", - "-ll", - "info", - "--log-file", - logOutputName - ) - ) - - val output = FileUtils.getText(logOutputName) - println(output) - } -} From 5af7f216487df81a5ad8cda6d4fb4b79cee0e4d2 Mon Sep 17 00:00:00 2001 From: joey0320 Date: Wed, 28 Dec 2022 11:12:18 -0800 Subject: [PATCH 243/273] Remove executeTop & make everything to execute --- .../transforms/GenerateTopAndHarness.scala | 30 ++++--------------- .../transforms/stage/TapeoutStage.scala | 9 ++---- 2 files changed, 7 insertions(+), 32 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 4df6e7999..de3b8a650 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -7,7 +7,6 @@ import firrtl.ir._ import firrtl.options.{Dependency, InputAnnotationFileAnnotation, StageMain} import firrtl.passes.memlib.ReplSeqMemAnnotation import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, OutputFileAnnotation, RunFirrtlTransformAnnotation} -import firrtl.passes.{ConvertFixedToSInt} import firrtl.transforms.BlackBoxResourceFileNameAnno import logger.LazyLogging @@ -23,8 +22,8 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg }.toList // Dump firrtl and annotation files - protected def dump( - circuit: Circuit, + // Reads global params "outAnno" + protected def dumpAnnos( annotations: AnnotationSeq ): Unit = { outAnno.foreach { annoPath => @@ -43,26 +42,8 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // TODO: Filter out blackbox dumping from this FIRRTL step, let CIRCT do it - // Top Generation - def executeTop(): Unit = { - val annos = new FirrtlStage().execute(Array.empty, annotations) - - annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { - case Some(circuit) => - dump(circuit, annos) - case _ => - throw new Exception(s"executeTop failed while executing FIRRTL!\n") - } - } - // Top and harness generation - def executeTopAndHarness(): Unit = { - executeTop() - - // For harness run, change some firrtlOptions (below) for harness phase - // customTransforms: setup harness transforms, add AvoidExtModuleCollisions - // outputFileNameOverride: change to harnessOutput - // conf file must change to harnessConf by mapping annotations + def execute(): Unit = { val generatorAnnotations = annotations .filterNot(_.isInstanceOf[OutputFileAnnotation]) .map { @@ -73,12 +54,11 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => - dump(circuit, annos) + dumpAnnos(annos) case _ => throw new Exception(s"executeTop failed while executing FIRRTL!\n") } } } -object GenerateTop extends StageMain(new TapeoutStage(doHarness = false)) -object GenerateTopAndHarness extends StageMain(new TapeoutStage(doHarness = true)) +object GenerateTopAndHarness extends StageMain(new TapeoutStage()) diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 338cbc869..730bbface 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -50,18 +50,13 @@ trait TapeoutCli { ).foreach(_.addOptions(parser)) } -class TapeoutStage(doHarness: Boolean) extends Stage { +class TapeoutStage() extends Stage { override val shell: Shell = new Shell(applicationName = "tapeout") with TapeoutCli with ChiselCli with FirrtlCli override def run(annotations: AnnotationSeq): AnnotationSeq = { Logger.makeScope(annotations) { val generator = new GenerateTopAndHarness(annotations) - - if (doHarness) { - generator.executeTopAndHarness() - } else { - generator.executeTop() - } + generator.execute() } annotations } From 723bab78b948c2da0e56dde4fe087443d3f31d60 Mon Sep 17 00:00:00 2001 From: joey0320 Date: Wed, 28 Dec 2022 13:09:27 -0800 Subject: [PATCH 244/273] Revert "Remove executeTop & make everything to execute" This reverts commit 5af7f216487df81a5ad8cda6d4fb4b79cee0e4d2. --- .../transforms/GenerateTopAndHarness.scala | 30 +++++++++++++++---- .../transforms/stage/TapeoutStage.scala | 9 ++++-- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index de3b8a650..4df6e7999 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -7,6 +7,7 @@ import firrtl.ir._ import firrtl.options.{Dependency, InputAnnotationFileAnnotation, StageMain} import firrtl.passes.memlib.ReplSeqMemAnnotation import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, OutputFileAnnotation, RunFirrtlTransformAnnotation} +import firrtl.passes.{ConvertFixedToSInt} import firrtl.transforms.BlackBoxResourceFileNameAnno import logger.LazyLogging @@ -22,8 +23,8 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg }.toList // Dump firrtl and annotation files - // Reads global params "outAnno" - protected def dumpAnnos( + protected def dump( + circuit: Circuit, annotations: AnnotationSeq ): Unit = { outAnno.foreach { annoPath => @@ -42,8 +43,26 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg // TODO: Filter out blackbox dumping from this FIRRTL step, let CIRCT do it + // Top Generation + def executeTop(): Unit = { + val annos = new FirrtlStage().execute(Array.empty, annotations) + + annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { + case Some(circuit) => + dump(circuit, annos) + case _ => + throw new Exception(s"executeTop failed while executing FIRRTL!\n") + } + } + // Top and harness generation - def execute(): Unit = { + def executeTopAndHarness(): Unit = { + executeTop() + + // For harness run, change some firrtlOptions (below) for harness phase + // customTransforms: setup harness transforms, add AvoidExtModuleCollisions + // outputFileNameOverride: change to harnessOutput + // conf file must change to harnessConf by mapping annotations val generatorAnnotations = annotations .filterNot(_.isInstanceOf[OutputFileAnnotation]) .map { @@ -54,11 +73,12 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => - dumpAnnos(annos) + dump(circuit, annos) case _ => throw new Exception(s"executeTop failed while executing FIRRTL!\n") } } } -object GenerateTopAndHarness extends StageMain(new TapeoutStage()) +object GenerateTop extends StageMain(new TapeoutStage(doHarness = false)) +object GenerateTopAndHarness extends StageMain(new TapeoutStage(doHarness = true)) diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 730bbface..338cbc869 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -50,13 +50,18 @@ trait TapeoutCli { ).foreach(_.addOptions(parser)) } -class TapeoutStage() extends Stage { +class TapeoutStage(doHarness: Boolean) extends Stage { override val shell: Shell = new Shell(applicationName = "tapeout") with TapeoutCli with ChiselCli with FirrtlCli override def run(annotations: AnnotationSeq): AnnotationSeq = { Logger.makeScope(annotations) { val generator = new GenerateTopAndHarness(annotations) - generator.execute() + + if (doHarness) { + generator.executeTopAndHarness() + } else { + generator.executeTop() + } } annotations } From 899387f4fb54279a8d5c0a16e543c2cc6e60f6b8 Mon Sep 17 00:00:00 2001 From: joey0320 Date: Wed, 28 Dec 2022 13:11:18 -0800 Subject: [PATCH 245/273] Fix dump to dumpAnnos --- .../tapeout/transforms/GenerateTopAndHarness.scala | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala index 4df6e7999..12c416cff 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala @@ -7,7 +7,6 @@ import firrtl.ir._ import firrtl.options.{Dependency, InputAnnotationFileAnnotation, StageMain} import firrtl.passes.memlib.ReplSeqMemAnnotation import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, OutputFileAnnotation, RunFirrtlTransformAnnotation} -import firrtl.passes.{ConvertFixedToSInt} import firrtl.transforms.BlackBoxResourceFileNameAnno import logger.LazyLogging @@ -23,8 +22,8 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg }.toList // Dump firrtl and annotation files - protected def dump( - circuit: Circuit, + // Use global param outAnno + protected def dumpAnnos( annotations: AnnotationSeq ): Unit = { outAnno.foreach { annoPath => @@ -49,7 +48,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => - dump(circuit, annos) + dumpAnnos(annos) case _ => throw new Exception(s"executeTop failed while executing FIRRTL!\n") } @@ -73,7 +72,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => - dump(circuit, annos) + dumpAnnos(annos) case _ => throw new Exception(s"executeTop failed while executing FIRRTL!\n") } From e340f1240a2c5feeb85160a7f81a32496916a7c4 Mon Sep 17 00:00:00 2001 From: joey0320 Date: Wed, 28 Dec 2022 23:39:49 -0800 Subject: [PATCH 246/273] Remove all passes again | rename GenerateTopAndHarness to GenerateModelStageMain --- ...ess.scala => GenerateModelStageMain.scala} | 37 ++----------------- .../transforms/stage/TapeoutStage.scala | 27 ++------------ 2 files changed, 7 insertions(+), 57 deletions(-) rename src/main/scala/barstools/tapeout/transforms/{GenerateTopAndHarness.scala => GenerateModelStageMain.scala} (51%) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala similarity index 51% rename from src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala rename to src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala index 12c416cff..2de98408f 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateTopAndHarness.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala @@ -10,11 +10,8 @@ import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, OutputFileAnnotation, import firrtl.transforms.BlackBoxResourceFileNameAnno import logger.LazyLogging -// Requires two phases, one to collect modules below synTop in the hierarchy -// and a second to remove those modules to generate the test harness -private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogging { +private class GenerateModelStageMain(annotations: AnnotationSeq) extends LazyLogging { val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } - val harnessConf: Option[String] = annotations.collectFirst { case HarnessConfAnnotation(h) => h } val annoFiles: List[String] = annotations.flatMap { case InputAnnotationFileAnnotation(f) => Some(f) @@ -40,10 +37,7 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg } } - // TODO: Filter out blackbox dumping from this FIRRTL step, let CIRCT do it - - // Top Generation - def executeTop(): Unit = { + def executeStageMain(): Unit = { val annos = new FirrtlStage().execute(Array.empty, annotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { @@ -53,31 +47,6 @@ private class GenerateTopAndHarness(annotations: AnnotationSeq) extends LazyLogg throw new Exception(s"executeTop failed while executing FIRRTL!\n") } } - - // Top and harness generation - def executeTopAndHarness(): Unit = { - executeTop() - - // For harness run, change some firrtlOptions (below) for harness phase - // customTransforms: setup harness transforms, add AvoidExtModuleCollisions - // outputFileNameOverride: change to harnessOutput - // conf file must change to harnessConf by mapping annotations - val generatorAnnotations = annotations - .filterNot(_.isInstanceOf[OutputFileAnnotation]) - .map { - case ReplSeqMemAnnotation(i, _) => ReplSeqMemAnnotation(i, harnessConf.get) - case anno => anno - } - - val annos = new FirrtlStage().execute(Array.empty, generatorAnnotations) - annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { - case Some(circuit) => - dumpAnnos(annos) - case _ => - throw new Exception(s"executeTop failed while executing FIRRTL!\n") - } - } } -object GenerateTop extends StageMain(new TapeoutStage(doHarness = false)) -object GenerateTopAndHarness extends StageMain(new TapeoutStage(doHarness = true)) +object GenerateModelStageMain extends StageMain(new TapeoutStage()) diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 338cbc869..bff297b1b 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -2,7 +2,7 @@ package barstools.tapeout.transforms.stage -import barstools.tapeout.transforms.GenerateTopAndHarness +import barstools.tapeout.transforms.GenerateModelStageMain import chisel3.stage.ChiselCli import firrtl.AnnotationSeq import firrtl.annotations.{Annotation, NoTargetAnnotation} @@ -27,41 +27,22 @@ object OutAnnoAnnotation extends HasShellOptions { ) } -case class HarnessConfAnnotation(harnessConf: String) extends NoTargetAnnotation with TapeoutOption - -object HarnessConfAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "harness-conf", - shortOption = Some("thconf"), - toAnnotationSeq = (s: String) => Seq(HarnessConfAnnotation(s)), - helpText = "use this to set the harness conf file location" - ) - ) -} - trait TapeoutCli { this: Shell => parser.note("Tapeout specific options") Seq( OutAnnoAnnotation, - HarnessConfAnnotation ).foreach(_.addOptions(parser)) } -class TapeoutStage(doHarness: Boolean) extends Stage { +class TapeoutStage() extends Stage { override val shell: Shell = new Shell(applicationName = "tapeout") with TapeoutCli with ChiselCli with FirrtlCli override def run(annotations: AnnotationSeq): AnnotationSeq = { Logger.makeScope(annotations) { - val generator = new GenerateTopAndHarness(annotations) - - if (doHarness) { - generator.executeTopAndHarness() - } else { - generator.executeTop() - } + val stageMain = new GenerateModelStageMain(annotations) + stageMain.executeStageMain() } annotations } From 06db6059022c55df8e6943702653798c43ead3d8 Mon Sep 17 00:00:00 2001 From: joey0320 Date: Wed, 28 Dec 2022 23:55:10 -0800 Subject: [PATCH 247/273] Fixes test for CI --- .../barstools/tapeout/transforms/GenerateModelStageMain.scala | 2 +- .../scala/barstools/tapeout/transforms/stage/TapeoutStage.scala | 2 +- src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala index 2de98408f..c48d4c359 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala @@ -11,7 +11,7 @@ import firrtl.transforms.BlackBoxResourceFileNameAnno import logger.LazyLogging private class GenerateModelStageMain(annotations: AnnotationSeq) extends LazyLogging { - val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } + val outAnno: Option[String] = annotations.collectFirst { case OutAnnoAnnotation(s) => s } val annoFiles: List[String] = annotations.flatMap { case InputAnnotationFileAnnotation(f) => Some(f) diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index bff297b1b..7ab597174 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -32,7 +32,7 @@ trait TapeoutCli { parser.note("Tapeout specific options") Seq( - OutAnnoAnnotation, + OutAnnoAnnotation ).foreach(_.addOptions(parser)) } diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala index a4dcd7802..bb089de9a 100644 --- a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala +++ b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala @@ -91,7 +91,7 @@ class GenerateSpec extends AnyFreeSpec { val targetDir = "test_run_dir/generate_spec" generateTestData(targetDir) - GenerateTop.main( + GenerateModelStageMain.main( Array( "-i", s"$targetDir/GenerateExampleTester.fir", From df3232f7d967d22570dd3c90706ce9422c25df07 Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Sun, 5 Feb 2023 21:44:51 -0800 Subject: [PATCH 248/273] Run RemoveValidIf pass for updated CIRCT --- .../transforms/AddSuffixToModuleNames.scala | 71 ----------------- .../transforms/AvoidExtModuleCollisions.scala | 32 -------- .../transforms/ConvertToExtModPass.scala | 62 --------------- .../tapeout/transforms/EnumerateModules.scala | 40 ---------- .../tapeout/transforms/ExtraTransforms.scala | 26 +++++++ .../transforms/GenerateModelStageMain.scala | 16 ++-- .../tapeout/transforms/ReParentCircuit.scala | 76 ------------------- .../transforms/RemoveUnusedModules.scala | 67 ---------------- .../tapeout/transforms/ResetInverter.scala | 69 ----------------- .../transforms/stage/TapeoutStage.scala | 23 +++++- 10 files changed, 59 insertions(+), 423 deletions(-) delete mode 100644 src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala delete mode 100644 src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala delete mode 100644 src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala delete mode 100644 src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala create mode 100644 src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala delete mode 100644 src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala delete mode 100644 src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala delete mode 100644 src/main/scala/barstools/tapeout/transforms/ResetInverter.scala diff --git a/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala b/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala deleted file mode 100644 index e2c5620dc..000000000 --- a/src/main/scala/barstools/tapeout/transforms/AddSuffixToModuleNames.scala +++ /dev/null @@ -1,71 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl.Mappers._ -import firrtl._ -import firrtl.annotations.{CircuitTarget, ModuleTarget, SingleTargetAnnotation} -import firrtl.ir._ -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency - -case class KeepNameAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { - def duplicate(n: ModuleTarget) = this.copy(n) -} - -case class ModuleNameSuffixAnnotation(target: CircuitTarget, suffix: String) - extends SingleTargetAnnotation[CircuitTarget] { - def duplicate(n: CircuitTarget) = this.copy(target = n) -} - -class AddSuffixToModuleNames extends Transform with DependencyAPIMigration { - - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized - override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters - override def invalidates(a: Transform): Boolean = false - - def determineRenamerandAnnos(state: CircuitState): (AnnotationSeq, (String) => String) = { - // remove determine suffix annotation - val newAnnos = state.annotations.filterNot(_.isInstanceOf[ModuleNameSuffixAnnotation]) - val suffixes = state.annotations.collect({ case ModuleNameSuffixAnnotation(_, suffix) => suffix }) - require(suffixes.length <= 1) - val suffix = suffixes.headOption.getOrElse("") - - // skip renaming ExtModules and top-level module - val excludeSet = state.circuit.modules.flatMap { - case e: ExtModule => Some(e.name) - case m if (m.name == state.circuit.main) => Some(m.name) - case _ => None - }.toSet - - val renamer = { (name: String) => if (excludeSet(name)) name else name + suffix } - - (newAnnos, renamer) - } - - def renameInstanceModules(renamer: (String) => String)(stmt: Statement): Statement = { - stmt match { - case m: DefInstance => new DefInstance(m.info, m.name, renamer(m.module)) - case s => s.map(renameInstanceModules(renamer)) // if is statement, recurse - } - } - - def run(state: CircuitState, renamer: (String) => String): (Circuit, RenameMap) = { - val myRenames = RenameMap() - val c = state.circuit - val modulesx = c.modules.map { - case m if (renamer(m.name) != m.name) => - myRenames.record(ModuleTarget(c.main, m.name), ModuleTarget(c.main, renamer(m.name))) - m.map(renamer).map(renameInstanceModules(renamer)) - case m => m.map(renameInstanceModules(renamer)) - } - (Circuit(c.info, modulesx, c.main), myRenames) - } - - def execute(state: CircuitState): CircuitState = { - val (newAnnos, renamer) = determineRenamerandAnnos(state) - val (ret, renames) = run(state, renamer) - state.copy(circuit = ret, annotations = newAnnos, renames = Some(renames)) - } -} diff --git a/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala b/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala deleted file mode 100644 index 127a37fa7..000000000 --- a/src/main/scala/barstools/tapeout/transforms/AvoidExtModuleCollisions.scala +++ /dev/null @@ -1,32 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.annotations.NoTargetAnnotation -import firrtl.ir._ -import firrtl.options.Dependency -import firrtl.passes.memlib.ReplSeqMem -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency - -case class LinkExtModulesAnnotation(mustLink: Seq[ExtModule]) extends NoTargetAnnotation - -class AvoidExtModuleCollisions extends Transform with DependencyAPIMigration { - - override def prerequisites: Seq[TransformDependency] = Forms.HighForm - override def optionalPrerequisites: Seq[TransformDependency] = Seq(Dependency[RemoveUnusedModules]) - override def optionalPrerequisiteOf: Seq[TransformDependency] = { - Forms.HighEmitters :+ Dependency[ReplSeqMem] - } - override def invalidates(a: Transform): Boolean = false - - def execute(state: CircuitState): CircuitState = { - val mustLink = state.annotations.flatMap { - case LinkExtModulesAnnotation(mustLink) => mustLink - case _ => Nil - } - val newAnnos = state.annotations.filterNot(_.isInstanceOf[LinkExtModulesAnnotation]) - state.copy(circuit = state.circuit.copy(modules = state.circuit.modules ++ mustLink), annotations = newAnnos) - } -} diff --git a/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala b/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala deleted file mode 100644 index a81937a3a..000000000 --- a/src/main/scala/barstools/tapeout/transforms/ConvertToExtModPass.scala +++ /dev/null @@ -1,62 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.annotations.{ModuleTarget, ReferenceTarget, SingleTargetAnnotation} -import firrtl.ir._ -import firrtl.options.Dependency -import firrtl.passes.memlib.ReplSeqMem -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency - -case class ConvertToExtModAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { - def duplicate(n: ModuleTarget) = this.copy(n) -} - -// Converts some modules to external modules, based on a given function. If -// that function returns "true" then the module is converted into an ExtModule, -// otherwise it's left alone. -class ConvertToExtMod extends Transform with DependencyAPIMigration { - - override def prerequisites: Seq[TransformDependency] = Forms.HighForm - override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty - override def optionalPrerequisiteOf: Seq[TransformDependency] = { - Forms.HighEmitters ++ Seq(Dependency[RemoveUnusedModules], Dependency[ReplSeqMem]) - } - override def invalidates(a: Transform): Boolean = false - - def run(state: CircuitState, makeExt: Set[String]): (Circuit, RenameMap) = { - val renames = RenameMap() - val c = state.circuit - renames.setCircuit(c.main) - val modulesx = c.modules.map { - case m: ExtModule => m - case m: Module => - val removing = collection.mutable.HashSet[String]() - def findDeadNames(statement: Statement): Unit = { - statement match { - case hn: IsDeclaration => removing += hn.name - case x => x.foreachStmt(findDeadNames) - } - } - if (makeExt(m.name)) { - m.foreachStmt(findDeadNames) - removing.foreach { name => - renames.record(ReferenceTarget(c.main, m.name, Nil, name, Nil), Nil) - } - new ExtModule(m.info, m.name, m.ports, m.name, Seq.empty) - } else { - m - } - } - (Circuit(c.info, modulesx, c.main), renames) - } - - def execute(state: CircuitState): CircuitState = { - val makeExt = state.annotations.collect({ case ConvertToExtModAnnotation(tgt) => tgt.module }).toSet - val newAnnos = state.annotations.filterNot(_.isInstanceOf[ConvertToExtModAnnotation]) - val (ret, renames) = run(state, makeExt) - state.copy(circuit = ret, annotations = newAnnos, renames = Some(renames)) - } -} diff --git a/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala b/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala deleted file mode 100644 index 47dae82c5..000000000 --- a/src/main/scala/barstools/tapeout/transforms/EnumerateModules.scala +++ /dev/null @@ -1,40 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.ir._ -import firrtl.passes.Pass -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency - -class EnumerateModulesPass(enumerate: (Module) => Unit) extends Pass { - - def run(c: Circuit): Circuit = { - val modulesx = c.modules.map { - case m: ExtModule => m - case m: Module => { - enumerate(m) - m - } - } - Circuit(c.info, modulesx, c.main) - } -} - -class EnumerateModules(enumerate: (Module) => Unit) - extends Transform - with SeqTransformBased - with DependencyAPIMigration { - - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized - override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters - override def invalidates(a: Transform): Boolean = false - - def transforms: Seq[Transform] = Seq(new EnumerateModulesPass(enumerate)) - - def execute(state: CircuitState): CircuitState = { - runTransforms(state) - } -} diff --git a/src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala b/src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala new file mode 100644 index 000000000..8cb075518 --- /dev/null +++ b/src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala @@ -0,0 +1,26 @@ +// See LICENSE for license details. + +package barstools.tapeout.transforms + +import firrtl.Mappers._ +import firrtl._ +import firrtl.annotations.{CircuitTarget, ModuleTarget, SingleTargetAnnotation} +import firrtl.ir._ +import firrtl.stage.Forms +import firrtl.stage.TransformManager.TransformDependency +import firrtl.options.{Dependency} + +class ExtraLowTransforms extends Transform with DependencyAPIMigration { + // this PropagatePresetAnnotations is needed to run the RemoveValidIf pass (that is removed from CIRCT). + // additionally, since that pass isn't explicitly a prereq of the LowFormEmitter it + // needs to wrapped in this xform + override def prerequisites: Seq[TransformDependency] = Forms.LowForm :+ + Dependency[firrtl.transforms.PropagatePresetAnnotations] + override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized + override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters + override def invalidates(a: Transform): Boolean = false + + def execute(state: CircuitState): CircuitState = { + state + } +} diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala index c48d4c359..706e8606e 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala @@ -5,9 +5,7 @@ import firrtl._ import firrtl.annotations._ import firrtl.ir._ import firrtl.options.{Dependency, InputAnnotationFileAnnotation, StageMain} -import firrtl.passes.memlib.ReplSeqMemAnnotation -import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, OutputFileAnnotation, RunFirrtlTransformAnnotation} -import firrtl.transforms.BlackBoxResourceFileNameAnno +import firrtl.stage.{FirrtlCircuitAnnotation, FirrtlStage, RunFirrtlTransformAnnotation} import logger.LazyLogging private class GenerateModelStageMain(annotations: AnnotationSeq) extends LazyLogging { @@ -38,15 +36,23 @@ private class GenerateModelStageMain(annotations: AnnotationSeq) extends LazyLog } def executeStageMain(): Unit = { - val annos = new FirrtlStage().execute(Array.empty, annotations) + val appendedAnnotations = annotations.filter(_ match { + case CompilerNameAnnotation(_) => true + case _ => false + }).map(_ match { + case CompilerNameAnnotation("low") => Some(RunFirrtlTransformAnnotation(Dependency[ExtraLowTransforms])) + case _ => None + }).flatten + val annos = new FirrtlStage().execute(Array.empty, annotations ++ appendedAnnotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => dumpAnnos(annos) case _ => - throw new Exception(s"executeTop failed while executing FIRRTL!\n") + throw new Exception(s"executeStageMain failed while executing FIRRTL!\n") } } } +// main run class object GenerateModelStageMain extends StageMain(new TapeoutStage()) diff --git a/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala b/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala deleted file mode 100644 index b027a7827..000000000 --- a/src/main/scala/barstools/tapeout/transforms/ReParentCircuit.scala +++ /dev/null @@ -1,76 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.annotations._ -import firrtl.options.Dependency -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency -import firrtl.annotations.TargetToken.{Instance, OfModule} - -case class ReParentCircuitAnnotation(target: ModuleTarget) extends SingleTargetAnnotation[ModuleTarget] { - def duplicate(n: ModuleTarget) = this.copy(n) -} - -class ReParentCircuit extends Transform with DependencyAPIMigration { - - override def prerequisites: Seq[TransformDependency] = Forms.HighForm - override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty - override def optionalPrerequisiteOf: Seq[TransformDependency] = { - Forms.HighEmitters :+ Dependency[RemoveUnusedModules] - } - override def invalidates(a: Transform): Boolean = false - - def execute(state: CircuitState): CircuitState = { - val c = state.circuit - val newTopName = state.annotations.collectFirst { case ReParentCircuitAnnotation(tgt) => - tgt.module - } - val newCircuit = c.copy(main = newTopName.getOrElse(c.main)) - val mainRename = newTopName.map { s => - val rmap = RenameMap() - rmap.record(CircuitTarget(c.main), CircuitTarget(s)) - rmap - } - - val newAnnotations = newTopName - .map({ topName => - // Update InstanceTargets and ReferenceTargets - // Yes, these are identical functions, but the copy methods force separate implementations - def updateInstance(t: InstanceTarget): Option[InstanceTarget] = { - val idx = t.path.lastIndexWhere(_._2.value == topName) - if (idx == -1) Some(t.copy(circuit = topName)) - else Some(t.copy(circuit = topName, module = topName, path = t.path.drop(idx + 1))) - } - def updateReference(t: ReferenceTarget): Option[ReferenceTarget] = { - val idx = t.path.lastIndexWhere(_._2.value == topName) - if (idx == -1) Some(t.copy(circuit = topName)) - else Some(t.copy(circuit = topName, module = topName, path = t.path.drop(idx + 1))) - } - - AnnotationSeq( - state.annotations.toSeq - .map({ - case x: SingleTargetAnnotation[InstanceTarget] if x.target.isInstanceOf[InstanceTarget] => - updateInstance(x.target).map(y => x.duplicate(y)) - case x: SingleTargetAnnotation[ReferenceTarget] if x.target.isInstanceOf[ReferenceTarget] => - updateReference(x.target).map(y => x.duplicate(y)) - case x: MultiTargetAnnotation => - val newTargets: Seq[Seq[Option[Target]]] = x.targets.map(_.map({ - case y: InstanceTarget => updateInstance(y) - case y: ReferenceTarget => updateReference(y) - case y => Some(y) - })) - if (newTargets.flatten.forall(_.isDefined)) Some(x.duplicate(newTargets.map(_.map(_.get)))) else None - case x => Some(x) - }) - .filter(_.isDefined) - .map(_.get) - ) - }) - .getOrElse(state.annotations) - - state.copy(circuit = newCircuit, renames = mainRename, annotations = newAnnotations) - } -} diff --git a/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala b/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala deleted file mode 100644 index 5d1cbc6cd..000000000 --- a/src/main/scala/barstools/tapeout/transforms/RemoveUnusedModules.scala +++ /dev/null @@ -1,67 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import firrtl._ -import firrtl.annotations.ModuleTarget -import firrtl.ir._ -import firrtl.options.Dependency -import firrtl.passes.memlib.ReplSeqMem -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency - -// Removes all the unused modules in a circuit by recursing through every -// instance (starting at the main module) -class RemoveUnusedModules extends Transform with DependencyAPIMigration { - - override def prerequisites: Seq[TransformDependency] = Forms.HighForm - override def optionalPrerequisites: Seq[TransformDependency] = Seq.empty - override def optionalPrerequisiteOf: Seq[TransformDependency] = { - Forms.HighEmitters :+ Dependency[ReplSeqMem] - } - override def invalidates(a: Transform): Boolean = false - - def execute(state: CircuitState): CircuitState = { - val modulesByName = state.circuit.modules.map { - case m: Module => (m.name, Some(m)) - case m: ExtModule => (m.name, None) - }.toMap - - def getUsedModules(om: Option[Module]): Set[String] = { - om match { - case Some(m) => { - def someStatements(statement: Statement): Seq[Statement] = - statement match { - case b: Block => - b.stmts.map { someStatements(_) } - .foldLeft(Seq[Statement]())(_ ++ _) - case when: Conditionally => - someStatements(when.conseq) ++ someStatements(when.alt) - case i: DefInstance => Seq(i) - case _ => Seq() - } - - someStatements(m.body).map { - case s: DefInstance => Set(s.module) | getUsedModules(modulesByName(s.module)) - case _ => Set[String]() - }.foldLeft(Set(m.name))(_ | _) - } - - case None => Set.empty[String] - } - } - val usedModuleSet = getUsedModules(modulesByName(state.circuit.main)) - - val usedModuleSeq = state.circuit.modules.filter { usedModuleSet contains _.name } - val usedModuleNames = usedModuleSeq.map(_.name) - - val renames = state.renames.getOrElse(RenameMap()) - - state.circuit.modules.filterNot { usedModuleSet contains _.name }.foreach { x => - renames.record(ModuleTarget(state.circuit.main, x.name), Nil) - } - - val newCircuit = Circuit(state.circuit.info, usedModuleSeq, state.circuit.main) - state.copy(circuit = newCircuit, renames = Some(renames)) - } -} diff --git a/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala b/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala deleted file mode 100644 index 29c9f0da7..000000000 --- a/src/main/scala/barstools/tapeout/transforms/ResetInverter.scala +++ /dev/null @@ -1,69 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import chisel3.experimental.RunFirrtlTransform -import firrtl.PrimOps.Not -import firrtl.annotations.{Annotation, CircuitName, ModuleName, SingleTargetAnnotation} -import firrtl.ir._ -import firrtl.passes.Pass -import firrtl.stage.Forms -import firrtl.stage.TransformManager.TransformDependency -import firrtl.{CircuitState, DependencyAPIMigration, Transform} - -case class ResetInverterAnnotation(target: ModuleName) extends SingleTargetAnnotation[ModuleName] { - override def duplicate(n: ModuleName): Annotation = ResetInverterAnnotation(n) -} - -object ResetN extends Pass { - private val Bool = UIntType(IntWidth(1)) - // Only works on Modules with a Bool port named reset - def invertReset(mod: Module): Module = { - // Check that it actually has reset - require(mod.ports.exists(p => p.name == "reset" && p.tpe == Bool), "Can only invert reset on a module with reset!") - // Rename "reset" to "reset_n" - val portsx = mod.ports.map { - case Port(info, "reset", Input, Bool) => - Port(info, "reset_n", Input, Bool) - case other => other - } - val newReset = DefNode(NoInfo, "reset", DoPrim(Not, Seq(Reference("reset_n", Bool)), Seq.empty, Bool)) - val bodyx = Block(Seq(newReset, mod.body)) - mod.copy(ports = portsx, body = bodyx) - } - - def run(c: Circuit): Circuit = { - c.copy(modules = c.modules.map { - case mod: Module if mod.name == c.main => invertReset(mod) - case other => other - }) - } -} - -class ResetInverterTransform extends Transform with DependencyAPIMigration { - - override def prerequisites: Seq[TransformDependency] = Forms.LowForm - override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized - override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters - override def invalidates(a: Transform): Boolean = false - - override def execute(state: CircuitState): CircuitState = { - state.annotations.filter(_.isInstanceOf[ResetInverterAnnotation]) match { - case Nil => state - case Seq(ResetInverterAnnotation(ModuleName(state.circuit.main, CircuitName(_)))) => - state.copy(circuit = ResetN.run(state.circuit)) - case annotations => - throw new Exception(s"There should be only one InvertReset annotation: got ${annotations.mkString(" -- ")}") - } - } -} - -trait ResetInverter { - self: chisel3.Module => - def invert[T <: chisel3.Module](module: T): Unit = { - chisel3.experimental.annotate(new chisel3.experimental.ChiselAnnotation with RunFirrtlTransform { - def transformClass: Class[_ <: Transform] = classOf[ResetInverterTransform] - def toFirrtl: Annotation = ResetInverterAnnotation(module.toNamed) - }) - } -} diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 7ab597174..8f3af9e3b 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -4,6 +4,7 @@ package barstools.tapeout.transforms.stage import barstools.tapeout.transforms.GenerateModelStageMain import chisel3.stage.ChiselCli +import firrtl.stage.{RunFirrtlTransformAnnotation} import firrtl.AnnotationSeq import firrtl.annotations.{Annotation, NoTargetAnnotation} import firrtl.options.{HasShellOptions, Shell, ShellOption, Stage, Unserializable} @@ -27,12 +28,32 @@ object OutAnnoAnnotation extends HasShellOptions { ) } +case class CompilerNameAnnotation(name: String) extends NoTargetAnnotation with TapeoutOption + +// duplicate of firrtl.stage.CompilerAnnotation but needed so that you can have a +// CompilerAnnotation to match on when adding new transforms +object DuplicateCompilerAnnotation extends HasShellOptions { + val options: Seq[ShellOption[_]] = Seq( + new ShellOption[String]( + longOption = "duplicate-compiler", + shortOption = Some("DX"), + toAnnotationSeq = (s: String) => { + Seq( + CompilerNameAnnotation(s)) + }, + helpText = "duplicate-compiler", + helpValueName = Some("same as --compiler FIRRTL flag") + ) + ) +} + trait TapeoutCli { this: Shell => parser.note("Tapeout specific options") Seq( - OutAnnoAnnotation + OutAnnoAnnotation, + DuplicateCompilerAnnotation ).foreach(_.addOptions(parser)) } From a9f9068baf5ecf3aa3c37980738971036e411731 Mon Sep 17 00:00:00 2001 From: joey0320 Date: Mon, 20 Feb 2023 23:58:46 -0800 Subject: [PATCH 249/273] remove duplicate compiler annotation --- .../transforms/GenerateModelStageMain.scala | 9 +-------- .../transforms/stage/TapeoutStage.scala | 20 ------------------- 2 files changed, 1 insertion(+), 28 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala index 706e8606e..08cd8d04a 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala @@ -36,14 +36,7 @@ private class GenerateModelStageMain(annotations: AnnotationSeq) extends LazyLog } def executeStageMain(): Unit = { - val appendedAnnotations = annotations.filter(_ match { - case CompilerNameAnnotation(_) => true - case _ => false - }).map(_ match { - case CompilerNameAnnotation("low") => Some(RunFirrtlTransformAnnotation(Dependency[ExtraLowTransforms])) - case _ => None - }).flatten - val annos = new FirrtlStage().execute(Array.empty, annotations ++ appendedAnnotations) + val annos = new FirrtlStage().execute(Array.empty, annotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { case Some(circuit) => diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 8f3af9e3b..14b57e3ec 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -28,32 +28,12 @@ object OutAnnoAnnotation extends HasShellOptions { ) } -case class CompilerNameAnnotation(name: String) extends NoTargetAnnotation with TapeoutOption - -// duplicate of firrtl.stage.CompilerAnnotation but needed so that you can have a -// CompilerAnnotation to match on when adding new transforms -object DuplicateCompilerAnnotation extends HasShellOptions { - val options: Seq[ShellOption[_]] = Seq( - new ShellOption[String]( - longOption = "duplicate-compiler", - shortOption = Some("DX"), - toAnnotationSeq = (s: String) => { - Seq( - CompilerNameAnnotation(s)) - }, - helpText = "duplicate-compiler", - helpValueName = Some("same as --compiler FIRRTL flag") - ) - ) -} - trait TapeoutCli { this: Shell => parser.note("Tapeout specific options") Seq( OutAnnoAnnotation, - DuplicateCompilerAnnotation ).foreach(_.addOptions(parser)) } From 4e398da790b05906e9ce22b7959a454e329e4ccd Mon Sep 17 00:00:00 2001 From: Tynan McAuley Date: Wed, 22 Feb 2023 21:54:11 -0800 Subject: [PATCH 250/273] Update scala/sbt/chisel versions The previous setup used Scala 2.12 even though Chipyard was trying to override it to use 2.13.10. Also sync Chisel minor versions with Chipyard, and update to a recent sbt version. --- build.sbt | 7 +++---- project/build.properties | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/build.sbt b/build.sbt index 1a9727640..d27a84289 100644 --- a/build.sbt +++ b/build.sbt @@ -1,15 +1,14 @@ // See LICENSE for license details. val defaultVersions = Map( - "chisel3" -> "3.5.1", - "chisel-iotesters" -> "2.5.1" + "chisel3" -> "3.5.5", + "chisel-iotesters" -> "2.5.5" ) organization := "edu.berkeley.cs" version := "0.4-SNAPSHOT" name := "tapeout" -scalaVersion := "2.12.13" -crossScalaVersions := Seq("2.12.13", "2.13.6") +scalaVersion := "2.13.10" scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls") Test / scalacOptions ++= Seq("-language:reflectiveCalls") fork := true diff --git a/project/build.properties b/project/build.properties index 0837f7a13..46e43a97e 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.3.13 +sbt.version=1.8.2 From 20587cfd40fbbb4ab57af851c3cef85f12fe2a9f Mon Sep 17 00:00:00 2001 From: Tynan McAuley Date: Wed, 22 Feb 2023 22:05:31 -0800 Subject: [PATCH 251/273] Run scalafmt after scala version update --- .../tapeout/transforms/ExtraTransforms.scala | 2 +- .../transforms/GenerateModelStageMain.scala | 17 ++++++++++------- .../tapeout/transforms/stage/TapeoutStage.scala | 3 +-- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala b/src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala index 8cb075518..f7ef25c6c 100644 --- a/src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala +++ b/src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala @@ -14,7 +14,7 @@ class ExtraLowTransforms extends Transform with DependencyAPIMigration { // this PropagatePresetAnnotations is needed to run the RemoveValidIf pass (that is removed from CIRCT). // additionally, since that pass isn't explicitly a prereq of the LowFormEmitter it // needs to wrapped in this xform - override def prerequisites: Seq[TransformDependency] = Forms.LowForm :+ + override def prerequisites: Seq[TransformDependency] = Forms.LowForm :+ Dependency[firrtl.transforms.PropagatePresetAnnotations] override def optionalPrerequisites: Seq[TransformDependency] = Forms.LowFormOptimized override def optionalPrerequisiteOf: Seq[TransformDependency] = Forms.LowEmitters diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala index 706e8606e..b40b2f92a 100644 --- a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala +++ b/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala @@ -36,13 +36,16 @@ private class GenerateModelStageMain(annotations: AnnotationSeq) extends LazyLog } def executeStageMain(): Unit = { - val appendedAnnotations = annotations.filter(_ match { - case CompilerNameAnnotation(_) => true - case _ => false - }).map(_ match { - case CompilerNameAnnotation("low") => Some(RunFirrtlTransformAnnotation(Dependency[ExtraLowTransforms])) - case _ => None - }).flatten + val appendedAnnotations = annotations + .filter(_ match { + case CompilerNameAnnotation(_) => true + case _ => false + }) + .map(_ match { + case CompilerNameAnnotation("low") => Some(RunFirrtlTransformAnnotation(Dependency[ExtraLowTransforms])) + case _ => None + }) + .flatten val annos = new FirrtlStage().execute(Array.empty, annotations ++ appendedAnnotations) annos.collectFirst { case FirrtlCircuitAnnotation(circuit) => circuit } match { diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 8f3af9e3b..606f19232 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -38,8 +38,7 @@ object DuplicateCompilerAnnotation extends HasShellOptions { longOption = "duplicate-compiler", shortOption = Some("DX"), toAnnotationSeq = (s: String) => { - Seq( - CompilerNameAnnotation(s)) + Seq(CompilerNameAnnotation(s)) }, helpText = "duplicate-compiler", helpValueName = Some("same as --compiler FIRRTL flag") From d9317d6019331ec2a3a9285494c0a808b6ad2160 Mon Sep 17 00:00:00 2001 From: Tynan McAuley Date: Wed, 22 Feb 2023 22:02:53 -0800 Subject: [PATCH 252/273] Remove unused test file This file is breaking the test build, the class it is testing was removed in df3232f7d967d22570dd3c90706ce9422c25df07. --- .../transforms/ResetInverterSpec.scala | 50 ------------------- 1 file changed, 50 deletions(-) delete mode 100644 src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala diff --git a/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala b/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala deleted file mode 100644 index 5d4c4ab37..000000000 --- a/src/test/scala/barstools/tapeout/transforms/ResetInverterSpec.scala +++ /dev/null @@ -1,50 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms - -import chisel3._ -import chisel3.stage.{ChiselGeneratorAnnotation, ChiselStage} -import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation} -import org.scalatest.freespec.AnyFreeSpec -import org.scalatest.matchers.should.Matchers - -class ExampleModuleNeedsResetInverted extends Module with ResetInverter { - val io = IO(new Bundle { - val out = Output(UInt(32.W)) - }) - - val r = RegInit(0.U) - - io.out := r - - invert(this) -} - -class ResetNSpec extends AnyFreeSpec with Matchers { - "Inverting reset needs to be done throughout module in Chirrtl" in { - val chirrtl = (new ChiselStage) - .emitChirrtl(new ExampleModuleNeedsResetInverted, Array("--target-dir", "test_run_dir/reset_n_spec")) - chirrtl should include("input reset :") - (chirrtl should not).include("input reset_n :") - (chirrtl should not).include("node reset = not(reset_n)") - } - - "Inverting reset needs to be done throughout module when generating firrtl" in { - // generate low-firrtl - val firrtl = (new ChiselStage) - .execute( - Array("-X", "low", "--target-dir", "test_run_dir/reset_inverting_spec"), - Seq(ChiselGeneratorAnnotation(() => new ExampleModuleNeedsResetInverted)) - ) - .collect { - case EmittedFirrtlCircuitAnnotation(a) => a - case EmittedFirrtlModuleAnnotation(a) => a - } - .map(_.value) - .mkString("") - - firrtl should include("input reset_n :") - firrtl should include("node reset = not(reset_n)") - (firrtl should not).include("input reset :") - } -} From fe81afec14634316606a9dd10628c220d53bd256 Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Fri, 3 Mar 2023 14:58:29 -0800 Subject: [PATCH 253/273] Update build.sbt for sbt-assembly --- build.sbt | 2 -- 1 file changed, 2 deletions(-) diff --git a/build.sbt b/build.sbt index d27a84289..8a1a8b60e 100644 --- a/build.sbt +++ b/build.sbt @@ -19,8 +19,6 @@ libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { libraryDependencies ++= Seq( "com.typesafe.play" %% "play-json" % "2.9.2", "org.scalatest" %% "scalatest" % "3.2.9" % "test", - "org.apache.logging.log4j" % "log4j-api" % "2.11.2", - "org.apache.logging.log4j" % "log4j-core" % "2.11.2" ) addCompilerPlugin("edu.berkeley.cs" % "chisel3-plugin" % defaultVersions("chisel3") cross CrossVersion.full) resolvers ++= Seq( From 0a4466da1ed291684859ba4765e45d498b469859 Mon Sep 17 00:00:00 2001 From: Kevin Anderson Date: Fri, 17 Mar 2023 20:37:04 -0700 Subject: [PATCH 254/273] Add name to IOCell definition --- .../barstools/iocell/chisel/IOCell.scala | 35 +++++++++++++++---- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/src/main/scala/barstools/iocell/chisel/IOCell.scala b/src/main/scala/barstools/iocell/chisel/IOCell.scala index 6d4449366..993f8b395 100644 --- a/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -59,7 +59,9 @@ class DigitalInIOCellBundle extends Bundle { val ie = Input(Bool()) } -trait IOCell extends BaseModule +trait IOCell extends BaseModule { + var i_name : String +} trait AnalogIOCell extends IOCell { val io: AnalogIOCellBundle @@ -87,15 +89,19 @@ abstract class GenericIOCell extends BlackBox with HasBlackBoxResource { class GenericAnalogIOCell extends GenericIOCell with AnalogIOCell { val io = IO(new AnalogIOCellBundle) + var i_name = "NoNameAssigned" } class GenericDigitalGPIOCell extends GenericIOCell with DigitalGPIOCell { val io = IO(new DigitalGPIOCellBundle) + var i_name = "NoNameAssigned" } class GenericDigitalInIOCell extends GenericIOCell with DigitalInIOCell { val io = IO(new DigitalInIOCellBundle) + var i_name = "NoNameAssigned" } class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell { val io = IO(new DigitalOutIOCellBundle) + var i_name = "NoNameAssigned" } trait IOCellTypeParams { @@ -112,8 +118,12 @@ case class GenericIOCellParams() extends IOCellTypeParams { def output() = Module(new GenericDigitalOutIOCell) } -object IOCell { +trait IOCellName { + var i_name : String +} +object IOCell extends IOCellName{ + var i_name = "NoNameAssigned" /** From within a RawModule or MultiIOModule context, generate new module IOs from a given * signal and return the new IO and a Seq containing all generated IO cells. * @param coreSignal The signal onto which to add IO cells @@ -156,10 +166,14 @@ object IOCell { )(coreSignal: T, padSignal: T ): Seq[IOCell] = { + print("Suggested names: " + name + " ") DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { val iocell = typeParams.input() - name.foreach(n => iocell.suggestName(n)) + name.foreach(n => { + iocell.suggestName(n) + iocell.i_name = n + }) coreSignal := castFromBool(iocell.io.i) iocell.io.ie := true.B iocell.io.pad := castToBool(padSignal) @@ -167,7 +181,10 @@ object IOCell { } case ActualDirection.Output => { val iocell = typeParams.output() - name.foreach(n => iocell.suggestName(n)) + name.foreach(n => { + iocell.suggestName(n) + iocell.i_name = n + }) iocell.io.o := castToBool(coreSignal) iocell.io.oe := true.B padSignal := castFromBool(iocell.io.pad) @@ -215,7 +232,10 @@ object IOCell { // Note that we are relying on chisel deterministically naming this in the index order (which it does) // This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals // An alternative solution would be to suggestName(n + "_" + i) - name.foreach(n => iocell.suggestName(n)) + name.foreach(n => { + iocell.suggestName(n) + iocell.i_name = n + }) iocell.io.pad := sig iocell.io.ie := true.B iocell @@ -230,7 +250,10 @@ object IOCell { // Note that we are relying on chisel deterministically naming this in the index order (which it does) // This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals // An alternative solution would be to suggestName(n + "_" + i) - name.foreach(n => iocell.suggestName(n)) + name.foreach(n => { + iocell.suggestName(n) + iocell.i_name = n + }) iocell.io.o := sig iocell.io.oe := true.B iocell From 0df6e34813cbf511a9fdff68a0daf895ad50b6a2 Mon Sep 17 00:00:00 2001 From: Kevin Anderson Date: Fri, 17 Mar 2023 20:48:35 -0700 Subject: [PATCH 255/273] formatting fix --- src/main/scala/barstools/iocell/chisel/IOCell.scala | 12 +++++++----- .../tapeout/transforms/stage/TapeoutStage.scala | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/main/scala/barstools/iocell/chisel/IOCell.scala b/src/main/scala/barstools/iocell/chisel/IOCell.scala index 993f8b395..65e1c73d7 100644 --- a/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -60,7 +60,7 @@ class DigitalInIOCellBundle extends Bundle { } trait IOCell extends BaseModule { - var i_name : String + var i_name: String } trait AnalogIOCell extends IOCell { @@ -119,11 +119,11 @@ case class GenericIOCellParams() extends IOCellTypeParams { } trait IOCellName { - var i_name : String + var i_name: String } -object IOCell extends IOCellName{ - var i_name = "NoNameAssigned" +object IOCell extends IOCellName { + /** From within a RawModule or MultiIOModule context, generate new module IOs from a given * signal and return the new IO and a Seq containing all generated IO cells. * @param coreSignal The signal onto which to add IO cells @@ -144,6 +144,8 @@ object IOCell extends IOCellName{ (padSignal, iocells) } + var i_name = "NoNameAssigned" + /** Connect two identical signals together by adding IO cells between them and return a Seq * containing all generated IO cells. * @param coreSignal The core-side (internal) signal onto which to connect/add IO cells @@ -253,7 +255,7 @@ object IOCell extends IOCellName{ name.foreach(n => { iocell.suggestName(n) iocell.i_name = n - }) + }) iocell.io.o := sig iocell.io.oe := true.B iocell diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala index 14b57e3ec..cdae1bfd7 100644 --- a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala +++ b/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala @@ -33,7 +33,7 @@ trait TapeoutCli { parser.note("Tapeout specific options") Seq( - OutAnnoAnnotation, + OutAnnoAnnotation ).foreach(_.addOptions(parser)) } From cc4f8419378272d2c309aa29da197d9cadfce629 Mon Sep 17 00:00:00 2001 From: Kevin Anderson Date: Sat, 18 Mar 2023 13:04:39 -0700 Subject: [PATCH 256/273] Code improvement; define IOCell name as Option and place in trait to reduce code modifications --- .../barstools/iocell/chisel/IOCell.scala | 35 +++++++++---------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/src/main/scala/barstools/iocell/chisel/IOCell.scala b/src/main/scala/barstools/iocell/chisel/IOCell.scala index 65e1c73d7..5bbfb1191 100644 --- a/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -60,7 +60,17 @@ class DigitalInIOCellBundle extends Bundle { } trait IOCell extends BaseModule { - var i_name: String + var iocell_name : Option[String] = None + + /** Set IOCell name + * @param s Proposed name for the IOCell + * + * @return An inherited IOCell with given the proposed name + */ + def suggestName(s: String) : this.type = { + iocell_name = Some(s) + super.suggestName(s) + } } trait AnalogIOCell extends IOCell { @@ -89,19 +99,15 @@ abstract class GenericIOCell extends BlackBox with HasBlackBoxResource { class GenericAnalogIOCell extends GenericIOCell with AnalogIOCell { val io = IO(new AnalogIOCellBundle) - var i_name = "NoNameAssigned" } class GenericDigitalGPIOCell extends GenericIOCell with DigitalGPIOCell { val io = IO(new DigitalGPIOCellBundle) - var i_name = "NoNameAssigned" } class GenericDigitalInIOCell extends GenericIOCell with DigitalInIOCell { val io = IO(new DigitalInIOCellBundle) - var i_name = "NoNameAssigned" } class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell { val io = IO(new DigitalOutIOCellBundle) - var i_name = "NoNameAssigned" } trait IOCellTypeParams { @@ -118,11 +124,9 @@ case class GenericIOCellParams() extends IOCellTypeParams { def output() = Module(new GenericDigitalOutIOCell) } -trait IOCellName { - var i_name: String -} -object IOCell extends IOCellName { + +object IOCell { /** From within a RawModule or MultiIOModule context, generate new module IOs from a given * signal and return the new IO and a Seq containing all generated IO cells. @@ -144,8 +148,6 @@ object IOCell extends IOCellName { (padSignal, iocells) } - var i_name = "NoNameAssigned" - /** Connect two identical signals together by adding IO cells between them and return a Seq * containing all generated IO cells. * @param coreSignal The core-side (internal) signal onto which to connect/add IO cells @@ -168,13 +170,11 @@ object IOCell extends IOCellName { )(coreSignal: T, padSignal: T ): Seq[IOCell] = { - print("Suggested names: " + name + " ") DataMirror.directionOf(coreSignal) match { case ActualDirection.Input => { val iocell = typeParams.input() name.foreach(n => { - iocell.suggestName(n) - iocell.i_name = n + iocell.suggestName(n) }) coreSignal := castFromBool(iocell.io.i) iocell.io.ie := true.B @@ -184,8 +184,7 @@ object IOCell extends IOCellName { case ActualDirection.Output => { val iocell = typeParams.output() name.foreach(n => { - iocell.suggestName(n) - iocell.i_name = n + iocell.suggestName(n) }) iocell.io.o := castToBool(coreSignal) iocell.io.oe := true.B @@ -236,7 +235,6 @@ object IOCell extends IOCellName { // An alternative solution would be to suggestName(n + "_" + i) name.foreach(n => { iocell.suggestName(n) - iocell.i_name = n }) iocell.io.pad := sig iocell.io.ie := true.B @@ -254,8 +252,7 @@ object IOCell extends IOCellName { // An alternative solution would be to suggestName(n + "_" + i) name.foreach(n => { iocell.suggestName(n) - iocell.i_name = n - }) + }) iocell.io.o := sig iocell.io.oe := true.B iocell From 96155c845c2a545bf1aa9d7dfd3b463f8b7efb44 Mon Sep 17 00:00:00 2001 From: Kevin Anderson Date: Sat, 18 Mar 2023 13:09:38 -0700 Subject: [PATCH 257/273] format IOCell.scala --- .../barstools/iocell/chisel/IOCell.scala | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/main/scala/barstools/iocell/chisel/IOCell.scala b/src/main/scala/barstools/iocell/chisel/IOCell.scala index 5bbfb1191..b90e43ac9 100644 --- a/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -60,14 +60,14 @@ class DigitalInIOCellBundle extends Bundle { } trait IOCell extends BaseModule { - var iocell_name : Option[String] = None + var iocell_name: Option[String] = None - /** Set IOCell name - * @param s Proposed name for the IOCell - * - * @return An inherited IOCell with given the proposed name - */ - def suggestName(s: String) : this.type = { + /** Set IOCell name + * @param s Proposed name for the IOCell + * + * @return An inherited IOCell with given the proposed name + */ + def suggestName(s: String): this.type = { iocell_name = Some(s) super.suggestName(s) } @@ -124,8 +124,6 @@ case class GenericIOCellParams() extends IOCellTypeParams { def output() = Module(new GenericDigitalOutIOCell) } - - object IOCell { /** From within a RawModule or MultiIOModule context, generate new module IOs from a given @@ -174,7 +172,7 @@ object IOCell { case ActualDirection.Input => { val iocell = typeParams.input() name.foreach(n => { - iocell.suggestName(n) + iocell.suggestName(n) }) coreSignal := castFromBool(iocell.io.i) iocell.io.ie := true.B @@ -184,7 +182,7 @@ object IOCell { case ActualDirection.Output => { val iocell = typeParams.output() name.foreach(n => { - iocell.suggestName(n) + iocell.suggestName(n) }) iocell.io.o := castToBool(coreSignal) iocell.io.oe := true.B @@ -252,7 +250,7 @@ object IOCell { // An alternative solution would be to suggestName(n + "_" + i) name.foreach(n => { iocell.suggestName(n) - }) + }) iocell.io.o := sig iocell.io.oe := true.B iocell From 400ce780a9b59a4a617e3ae9dbd0e97ca8e802ab Mon Sep 17 00:00:00 2001 From: Ethan Wu Date: Tue, 30 May 2023 18:39:57 -0700 Subject: [PATCH 258/273] move iocells to separate "project" root --- {src => iocell/src}/main/resources/barstools/iocell/vsrc/Analog.v | 0 {src => iocell/src}/main/resources/barstools/iocell/vsrc/IOCell.v | 0 .../src}/main/scala/barstools/iocell/chisel/Analog.scala | 0 .../src}/main/scala/barstools/iocell/chisel/IOCell.scala | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename {src => iocell/src}/main/resources/barstools/iocell/vsrc/Analog.v (100%) rename {src => iocell/src}/main/resources/barstools/iocell/vsrc/IOCell.v (100%) rename {src => iocell/src}/main/scala/barstools/iocell/chisel/Analog.scala (100%) rename {src => iocell/src}/main/scala/barstools/iocell/chisel/IOCell.scala (100%) diff --git a/src/main/resources/barstools/iocell/vsrc/Analog.v b/iocell/src/main/resources/barstools/iocell/vsrc/Analog.v similarity index 100% rename from src/main/resources/barstools/iocell/vsrc/Analog.v rename to iocell/src/main/resources/barstools/iocell/vsrc/Analog.v diff --git a/src/main/resources/barstools/iocell/vsrc/IOCell.v b/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v similarity index 100% rename from src/main/resources/barstools/iocell/vsrc/IOCell.v rename to iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v diff --git a/src/main/scala/barstools/iocell/chisel/Analog.scala b/iocell/src/main/scala/barstools/iocell/chisel/Analog.scala similarity index 100% rename from src/main/scala/barstools/iocell/chisel/Analog.scala rename to iocell/src/main/scala/barstools/iocell/chisel/Analog.scala diff --git a/src/main/scala/barstools/iocell/chisel/IOCell.scala b/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala similarity index 100% rename from src/main/scala/barstools/iocell/chisel/IOCell.scala rename to iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala From 2d45407aad78876a5586e157eb9d69e80328e78f Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Tue, 20 Jun 2023 17:09:18 -0700 Subject: [PATCH 259/273] asBool() to asBool --- build.sbt | 7 ++----- src/main/scala/barstools/iocell/chisel/IOCell.scala | 2 +- .../tapeout/transforms/utils/ProgrammaticBundle.scala | 2 -- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/build.sbt b/build.sbt index 8a1a8b60e..9ce7d00cc 100644 --- a/build.sbt +++ b/build.sbt @@ -1,9 +1,6 @@ // See LICENSE for license details. -val defaultVersions = Map( - "chisel3" -> "3.5.5", - "chisel-iotesters" -> "2.5.5" -) +val defaultVersions = Map("chisel3" -> "3.6.0") organization := "edu.berkeley.cs" version := "0.4-SNAPSHOT" @@ -13,7 +10,7 @@ scalacOptions := Seq("-deprecation", "-feature", "-language:reflectiveCalls") Test / scalacOptions ++= Seq("-language:reflectiveCalls") fork := true mainClass := Some("barstools.macros.MacroCompiler") -libraryDependencies ++= Seq("chisel3","chisel-iotesters").map { +libraryDependencies ++= Seq("chisel3").map { dep: String => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", defaultVersions(dep)) } libraryDependencies ++= Seq( diff --git a/src/main/scala/barstools/iocell/chisel/IOCell.scala b/src/main/scala/barstools/iocell/chisel/IOCell.scala index b90e43ac9..457fa12b5 100644 --- a/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -153,7 +153,7 @@ object IOCell { * @param name An optional name or name prefix to use for naming IO cells * @return A Seq of all generated IO cell instances */ - val toSyncReset: (Reset) => Bool = _.asBool() + val toSyncReset: (Reset) => Bool = _.asBool val toAsyncReset: (Reset) => AsyncReset = _.asAsyncReset def generateFromSignal[T <: Data, R <: Reset]( coreSignal: T, diff --git a/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala b/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala index 66200e617..ef98b294e 100644 --- a/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala +++ b/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala @@ -8,7 +8,6 @@ class CustomBundle[T <: Data](elts: (String, T)*) extends Record { val elements = ListMap(elts.map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) def apply(elt: String): T = elements(elt) def apply(elt: Int): T = elements(elt.toString) - override def cloneType = (new CustomBundle(elements.toList: _*)).asInstanceOf[this.type] } class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { @@ -17,7 +16,6 @@ class CustomIndexedBundle[T <: Data](elts: (Int, T)*) extends Record { // TODO: Make an equivalent to the below work publicly (or only on subclasses?) def indexedElements = ListMap(elts.map { case (field, elt) => field -> chiselTypeOf(elt) }: _*) def apply(elt: Int): T = elements(elt.toString) - override def cloneType = (new CustomIndexedBundle(indexedElements.toList: _*)).asInstanceOf[this.type] } object CustomIndexedBundle { From 27f4b83033e7ea87336dd47e3bb3a6298e0dfdec Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Wed, 12 Jul 2023 13:57:31 -0700 Subject: [PATCH 260/273] Remove firrtl_interpreter tests --- .../scala/barstools/macros/Functional.scala | 236 +++++++++--------- 1 file changed, 118 insertions(+), 118 deletions(-) diff --git a/src/test/scala/barstools/macros/Functional.scala b/src/test/scala/barstools/macros/Functional.scala index 9366f6f98..ddc33477a 100644 --- a/src/test/scala/barstools/macros/Functional.scala +++ b/src/test/scala/barstools/macros/Functional.scala @@ -1,120 +1,120 @@ package barstools.macros -import firrtl.ir.Circuit -import firrtl_interpreter.InterpretiveTester - -// Functional tests on memory compiler outputs. - -// Synchronous write and read back. -class SynchronousReadAndWrite extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 12 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - - compile(mem, lib, v, synflops = true) - val result: Circuit = execute(mem, lib, synflops = true) - - it should "run with InterpretedTester" in { - pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published - - val addr1 = 0 - val addr1val = 0xff - val addr2 = 42 - val addr2val = 0xf0 - val addr3 = 1 << 10 - val addr3val = 1 << 10 - - val tester = new InterpretiveTester(result.serialize) - //~ tester.setVerbose() - - tester.poke("outer_write_en", 0) - tester.step() - - // Write addresses and read them. - tester.poke("outer_addr", addr1) - tester.poke("outer_din", addr1val) - tester.poke("outer_write_en", 1) - tester.step() - tester.poke("outer_write_en", 0) - tester.step() - tester.poke("outer_addr", addr2) - tester.poke("outer_din", addr2val) - tester.poke("outer_write_en", 1) - tester.step() - tester.poke("outer_write_en", 0) - tester.step() - tester.poke("outer_addr", addr3) - tester.poke("outer_din", addr3val) - tester.poke("outer_write_en", 1) - tester.step() - tester.poke("outer_write_en", 0) - tester.step() - - tester.poke("outer_addr", addr1) - tester.step() - tester.expect("outer_dout", addr1val) - - tester.poke("outer_addr", addr2) - tester.step() - tester.expect("outer_dout", addr2val) - - tester.poke("outer_addr", addr3) - tester.step() - tester.expect("outer_dout", addr3val) - } -} - -// Test to verify that the circuit doesn't read combinationally based on addr -// between two submemories. -class DontReadCombinationally extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 8 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - - compile(mem, lib, v, synflops = true) - val result: Circuit = execute(mem, lib, synflops = true) - - it should "run with InterpretedTester" in { - pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published - - val addr1 = 0 - val addr1a = 1 - val addr2 = 1 << 10 - - val tester = new InterpretiveTester(result.serialize) - //~ tester.setVerbose() - - tester.poke("outer_write_en", 0) - tester.step() - - // Write two addresses, one in the lower submemory and the other in the - // higher submemory. - tester.poke("outer_addr", addr1) - tester.poke("outer_din", 0x11) - tester.poke("outer_write_en", 1) - tester.step() - tester.poke("outer_addr", addr1a) - tester.poke("outer_din", 0x1a) - tester.poke("outer_write_en", 1) - tester.step() - tester.poke("outer_addr", addr2) - tester.poke("outer_din", 0xaa) - tester.poke("outer_write_en", 1) - tester.step() - tester.poke("outer_write_en", 0) - tester.poke("outer_addr", addr1) - tester.step() - - // Test that there is no combinational read. - tester.poke("outer_addr", addr1) - tester.expect("outer_dout", 0x11) - tester.poke("outer_addr", addr1a) - tester.expect("outer_dout", 0x11) - tester.poke("outer_addr", addr2) - tester.expect("outer_dout", 0x11) - - // And upon step it should work again. - tester.step() - tester.expect("outer_addr", 0xaa) - } -} +// import firrtl.ir.Circuit +// import firrtl_interpreter.InterpretiveTester + +// // Functional tests on memory compiler outputs. + +// // Synchronous write and read back. +// class SynchronousReadAndWrite extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { +// override lazy val width = 12 +// override lazy val memDepth = BigInt(2048) +// override lazy val libDepth = BigInt(1024) + +// compile(mem, lib, v, synflops = true) +// val result: Circuit = execute(mem, lib, synflops = true) + +// it should "run with InterpretedTester" in { +// pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published + +// val addr1 = 0 +// val addr1val = 0xff +// val addr2 = 42 +// val addr2val = 0xf0 +// val addr3 = 1 << 10 +// val addr3val = 1 << 10 + +// val tester = new InterpretiveTester(result.serialize) +// //~ tester.setVerbose() + +// tester.poke("outer_write_en", 0) +// tester.step() + +// // Write addresses and read them. +// tester.poke("outer_addr", addr1) +// tester.poke("outer_din", addr1val) +// tester.poke("outer_write_en", 1) +// tester.step() +// tester.poke("outer_write_en", 0) +// tester.step() +// tester.poke("outer_addr", addr2) +// tester.poke("outer_din", addr2val) +// tester.poke("outer_write_en", 1) +// tester.step() +// tester.poke("outer_write_en", 0) +// tester.step() +// tester.poke("outer_addr", addr3) +// tester.poke("outer_din", addr3val) +// tester.poke("outer_write_en", 1) +// tester.step() +// tester.poke("outer_write_en", 0) +// tester.step() + +// tester.poke("outer_addr", addr1) +// tester.step() +// tester.expect("outer_dout", addr1val) + +// tester.poke("outer_addr", addr2) +// tester.step() +// tester.expect("outer_dout", addr2val) + +// tester.poke("outer_addr", addr3) +// tester.step() +// tester.expect("outer_dout", addr3val) +// } +// } + +// // Test to verify that the circuit doesn't read combinationally based on addr +// // between two submemories. +// class DontReadCombinationally extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { +// override lazy val width = 8 +// override lazy val memDepth = BigInt(2048) +// override lazy val libDepth = BigInt(1024) + +// compile(mem, lib, v, synflops = true) +// val result: Circuit = execute(mem, lib, synflops = true) + +// it should "run with InterpretedTester" in { +// pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published + +// val addr1 = 0 +// val addr1a = 1 +// val addr2 = 1 << 10 + +// val tester = new InterpretiveTester(result.serialize) +// //~ tester.setVerbose() + +// tester.poke("outer_write_en", 0) +// tester.step() + +// // Write two addresses, one in the lower submemory and the other in the +// // higher submemory. +// tester.poke("outer_addr", addr1) +// tester.poke("outer_din", 0x11) +// tester.poke("outer_write_en", 1) +// tester.step() +// tester.poke("outer_addr", addr1a) +// tester.poke("outer_din", 0x1a) +// tester.poke("outer_write_en", 1) +// tester.step() +// tester.poke("outer_addr", addr2) +// tester.poke("outer_din", 0xaa) +// tester.poke("outer_write_en", 1) +// tester.step() +// tester.poke("outer_write_en", 0) +// tester.poke("outer_addr", addr1) +// tester.step() + +// // Test that there is no combinational read. +// tester.poke("outer_addr", addr1) +// tester.expect("outer_dout", 0x11) +// tester.poke("outer_addr", addr1a) +// tester.expect("outer_dout", 0x11) +// tester.poke("outer_addr", addr2) +// tester.expect("outer_dout", 0x11) + +// // And upon step it should work again. +// tester.step() +// tester.expect("outer_addr", 0xaa) +// } +// } From 368dde4a359b8e4b9d889b46273d40097f60ffcd Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Sun, 30 Jul 2023 13:12:55 -0700 Subject: [PATCH 261/273] Generate 1 file per generic IOCell --- .../barstools/iocell/chisel/IOCell.scala | 58 ++++++++++++++++++- 1 file changed, 55 insertions(+), 3 deletions(-) diff --git a/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala b/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala index 457fa12b5..bdde178ef 100644 --- a/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -3,7 +3,7 @@ package barstools.iocell.chisel import chisel3._ -import chisel3.util.{Cat, HasBlackBoxResource} +import chisel3.util.{Cat, HasBlackBoxResource, HasBlackBoxInline} import chisel3.experimental.{Analog, BaseModule, DataMirror, IO} // The following four IO cell bundle types are bare-minimum functional connections @@ -93,21 +93,73 @@ trait DigitalOutIOCell extends IOCell { // implementation of an IO cell. For building a real chip, it is important to implement // and use similar classes which wrap the foundry-specific IO cells. -abstract class GenericIOCell extends BlackBox with HasBlackBoxResource { - addResource("/barstools/iocell/vsrc/IOCell.v") +abstract class GenericIOCell extends BlackBox with HasBlackBoxInline { + val impl: String + val moduleName = this.getClass.getSimpleName + setInline(s"$moduleName.v", impl); } class GenericAnalogIOCell extends GenericIOCell with AnalogIOCell { val io = IO(new AnalogIOCellBundle) + lazy val impl = s""" +`timescale 1ns/1ps +module GenericAnalogIOCell( + inout pad, + inout core +); + + assign core = 1'bz; + assign pad = core; + +endmodule""" } + class GenericDigitalGPIOCell extends GenericIOCell with DigitalGPIOCell { val io = IO(new DigitalGPIOCellBundle) + lazy val impl = s""" +`timescale 1ns/1ps +module GenericDigitalGPIOCell( + inout pad, + output i, + input ie, + input o, + input oe +); + + assign pad = oe ? o : 1'bz; + assign i = ie ? pad : 1'b0; + +endmodule""" } + class GenericDigitalInIOCell extends GenericIOCell with DigitalInIOCell { val io = IO(new DigitalInIOCellBundle) + lazy val impl = s""" +`timescale 1ns/1ps +module GenericDigitalInIOCell( + input pad, + output i, + input ie +); + + assign i = ie ? pad : 1'b0; + +endmodule""" } + class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell { val io = IO(new DigitalOutIOCellBundle) + lazy val impl = s""" +`timescale 1ns/1ps +module GenericDigitalOutIOCell( + output pad, + input o, + input oe +); + + assign pad = oe ? o : 1'bz; + +endmodule""" } trait IOCellTypeParams { From c8723f40b15218025216db90fd4cc16d6e9eacfd Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Sun, 30 Jul 2023 13:17:26 -0700 Subject: [PATCH 262/273] Macrocompiler: FIRRTL-elab macros 1-at-a-time Elaborating all macros in a single Circuit with an arbitrary (last) macro selected as the circuit main main cause some macros to be dropped, even with the DCEAnnotation. Work around this for now by elaborating each module in the macrocompiled circuit independently, then concatenating the verilog. --- .../barstools/macros/MacroCompiler.scala | 39 +++++++++++++------ 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/src/main/scala/barstools/macros/MacroCompiler.scala b/src/main/scala/barstools/macros/MacroCompiler.scala index 968e6b30d..baed84112 100644 --- a/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/src/main/scala/barstools/macros/MacroCompiler.scala @@ -14,7 +14,6 @@ import firrtl.ir._ import firrtl.options.Dependency import firrtl.stage.TransformManager.TransformDependency import firrtl.stage.{FirrtlSourceAnnotation, FirrtlStage, Forms, OutputFileAnnotation, RunFirrtlTransformAnnotation} -import firrtl.transforms.NoDCEAnnotation import firrtl.{PrimOps, _} import mdf.macrolib.{PolarizedPort, PortPolarity, SRAMCompiler, SRAMGroup, SRAMMacro} @@ -898,16 +897,34 @@ object MacroCompiler extends App { val macroCompiled = (new MacroCompilerTransform).execute(macroCompilerInput) // Run FIRRTL compiler - (new FirrtlStage).execute( - Array.empty, - Seq( - OutputFileAnnotation(params.getOrElse(Verilog, "")), - RunFirrtlTransformAnnotation(new VerilogEmitter), - EmitCircuitAnnotation(classOf[VerilogEmitter]), - NoDCEAnnotation, - FirrtlSourceAnnotation(macroCompiled.circuit.serialize) - ) - ) + // For each generated module, have to create a new circuit with that module + // as top, and all other modules as ExtModules. This guarantees all modules + // are elaborated + val verilog = macroCompiled.circuit.modules + .map(_.name) + .map { macroName => + val (mainMod, otherMods) = macroCompiled.circuit.modules.partition(_.name == macroName) + val extMods = otherMods.map(m => ExtModule(NoInfo, m.name, m.ports, m.name, Nil)) + + val circuit = Circuit(NoInfo, mainMod ++ extMods, macroName) + (new FirrtlStage) + .execute( + Array.empty, + Seq( + RunFirrtlTransformAnnotation(new VerilogEmitter), + EmitCircuitAnnotation(classOf[VerilogEmitter]), + FirrtlSourceAnnotation(circuit.serialize) + ) + ) + .collect { case c: EmittedVerilogCircuitAnnotation => c } + .head + .value + .value + } + .mkString("\n") + val verilogWriter = new FileWriter(new File(params.get(Verilog).get)) + verilogWriter.write(verilog) + verilogWriter.close() params.get(HammerIR) match { case Some(hammerIRFile: String) => From f5fe37c4bf0a37eb5033eeaf930c337895470607 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Mon, 31 Jul 2023 09:52:15 -0700 Subject: [PATCH 263/273] Delete IOCell.v --- .../resources/barstools/iocell/vsrc/IOCell.v | 46 ------------------- 1 file changed, 46 deletions(-) delete mode 100644 iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v diff --git a/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v b/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v deleted file mode 100644 index 8023fb70d..000000000 --- a/iocell/src/main/resources/barstools/iocell/vsrc/IOCell.v +++ /dev/null @@ -1,46 +0,0 @@ -// See LICENSE for license details - -`timescale 1ns/1ps - -module GenericAnalogIOCell( - inout pad, - inout core -); - - assign core = 1'bz; - assign pad = core; - -endmodule - -module GenericDigitalGPIOCell( - inout pad, - output i, - input ie, - input o, - input oe -); - - assign pad = oe ? o : 1'bz; - assign i = ie ? pad : 1'b0; - -endmodule - -module GenericDigitalInIOCell( - input pad, - output i, - input ie -); - - assign i = ie ? pad : 1'b0; - -endmodule - -module GenericDigitalOutIOCell( - output pad, - input o, - input oe -); - - assign pad = oe ? o : 1'bz; - -endmodule From eef5efb93ee3df6296b8960605ba9ffa64a6d69e Mon Sep 17 00:00:00 2001 From: abejgonzalez Date: Mon, 16 Oct 2023 16:08:53 -0700 Subject: [PATCH 264/273] Dump per macro verilog (overridden by final verilog output) --- src/main/scala/barstools/macros/MacroCompiler.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/scala/barstools/macros/MacroCompiler.scala b/src/main/scala/barstools/macros/MacroCompiler.scala index baed84112..6bcd06c5f 100644 --- a/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/src/main/scala/barstools/macros/MacroCompiler.scala @@ -911,6 +911,7 @@ object MacroCompiler extends App { .execute( Array.empty, Seq( + OutputFileAnnotation(params.get(Verilog).get), RunFirrtlTransformAnnotation(new VerilogEmitter), EmitCircuitAnnotation(classOf[VerilogEmitter]), FirrtlSourceAnnotation(circuit.serialize) @@ -922,6 +923,7 @@ object MacroCompiler extends App { .value } .mkString("\n") + val verilogWriter = new FileWriter(new File(params.get(Verilog).get)) verilogWriter.write(verilog) verilogWriter.close() From 7819dc69a4dff8a4eb293548c88a626e613cf096 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Thu, 19 Oct 2023 10:33:50 -0700 Subject: [PATCH 265/273] Emit a empty HammerIR JSON when no macros to avoid downstream tool problems --- src/main/scala/barstools/macros/MacroCompiler.scala | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/main/scala/barstools/macros/MacroCompiler.scala b/src/main/scala/barstools/macros/MacroCompiler.scala index 6bcd06c5f..459992f2d 100644 --- a/src/main/scala/barstools/macros/MacroCompiler.scala +++ b/src/main/scala/barstools/macros/MacroCompiler.scala @@ -950,6 +950,14 @@ object MacroCompiler extends App { verilogWriter.close() case None => } + params.get(HammerIR) match { + case Some(hammerIRFile: String) => + // Create an empty HammerIR file + val hammerIRWriter = new FileWriter(new File(hammerIRFile)) + hammerIRWriter.write("[]\n") + hammerIRWriter.close() + case None => + } } } catch { case e: java.util.NoSuchElementException => From 16b56379aa1662c8481268ed47b719810c85af44 Mon Sep 17 00:00:00 2001 From: Tynan McAuley Date: Wed, 3 Jan 2024 11:00:46 -0800 Subject: [PATCH 266/273] Update deprecated APIs to prepare for Chisel 5 - `IO` was moved from `chisel3.experimental` to `chisel3` - `DataMirror` was moved from `chisel3.experimental` to `chisel3.reflect` --- iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala b/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala index bdde178ef..d38f8406e 100644 --- a/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala +++ b/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala @@ -3,8 +3,9 @@ package barstools.iocell.chisel import chisel3._ -import chisel3.util.{Cat, HasBlackBoxResource, HasBlackBoxInline} -import chisel3.experimental.{Analog, BaseModule, DataMirror, IO} +import chisel3.util.{Cat, HasBlackBoxInline} +import chisel3.reflect.DataMirror +import chisel3.experimental.{Analog, BaseModule} // The following four IO cell bundle types are bare-minimum functional connections // for modeling 4 different IO cell scenarios. The intention is that the user From c97627c172e9e3c0e1758a4c1f404f080ac130db Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 19 Apr 2024 11:03:10 -0700 Subject: [PATCH 267/273] Move IOCell files --- .../chipyard/src/main/resources}/vsrc/Analog.v | 0 .../chipyard/src/main/scala/iocell}/Analog.scala | 0 .../chipyard/src/main/scala/iocell}/IOCell.scala | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename {iocell/src/main/resources/barstools/iocell => generators/chipyard/src/main/resources}/vsrc/Analog.v (100%) rename {iocell/src/main/scala/barstools/iocell/chisel => generators/chipyard/src/main/scala/iocell}/Analog.scala (100%) rename {iocell/src/main/scala/barstools/iocell/chisel => generators/chipyard/src/main/scala/iocell}/IOCell.scala (100%) diff --git a/iocell/src/main/resources/barstools/iocell/vsrc/Analog.v b/generators/chipyard/src/main/resources/vsrc/Analog.v similarity index 100% rename from iocell/src/main/resources/barstools/iocell/vsrc/Analog.v rename to generators/chipyard/src/main/resources/vsrc/Analog.v diff --git a/iocell/src/main/scala/barstools/iocell/chisel/Analog.scala b/generators/chipyard/src/main/scala/iocell/Analog.scala similarity index 100% rename from iocell/src/main/scala/barstools/iocell/chisel/Analog.scala rename to generators/chipyard/src/main/scala/iocell/Analog.scala diff --git a/iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala b/generators/chipyard/src/main/scala/iocell/IOCell.scala similarity index 100% rename from iocell/src/main/scala/barstools/iocell/chisel/IOCell.scala rename to generators/chipyard/src/main/scala/iocell/IOCell.scala From 4830ebf239d7e1e1370a41325a458b3e7b75e428 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 19 Apr 2024 11:03:27 -0700 Subject: [PATCH 268/273] Delete useless files from barstools merge --- .github/workflows/run-ci.yml | 44 ------------------------------------ .scalafmt.conf | 27 ---------------------- 2 files changed, 71 deletions(-) delete mode 100644 .github/workflows/run-ci.yml delete mode 100644 .scalafmt.conf diff --git a/.github/workflows/run-ci.yml b/.github/workflows/run-ci.yml deleted file mode 100644 index bee783810..000000000 --- a/.github/workflows/run-ci.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Test - -on: - pull_request: - push: - branches: - - master - -jobs: - test: - name: Unit Tests - runs-on: ubuntu-latest - strategy: - matrix: - scala: [ 2.12.14 ] - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Setup Scala - uses: olafurpg/setup-scala@v10 - - name: Cache - uses: coursier/cache-action@v5 - - name: Get submodules - run: git submodule update --init - - name: Test - run: sbt test - - doc: - name: Documentation and formatting - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Setup Scala - uses: olafurpg/setup-scala@v10 - - name: Check Formatting - run: sbt scalafmtCheckAll - - all_test_passed: - name: "all tests passed" - runs-on: ubuntu-latest - needs: [test, doc] - steps: - - run: echo Success diff --git a/.scalafmt.conf b/.scalafmt.conf deleted file mode 100644 index 5be685f32..000000000 --- a/.scalafmt.conf +++ /dev/null @@ -1,27 +0,0 @@ -version = 2.7.5 - -maxColumn = 120 -align = most -continuationIndent.defnSite = 2 -assumeStandardLibraryStripMargin = true -docstrings = ScalaDoc -lineEndings = preserve -includeCurlyBraceInSelectChains = false -danglingParentheses.defnSite = true -danglingParentheses.callSite = true - -align.tokens.add = [ - { - code = ":" - } -] - -newlines.alwaysBeforeCurlyBraceLambdaParams = false -newlines.alwaysBeforeMultilineDef = false -newlines.implicitParamListModifierForce = [before] - -verticalMultiline.atDefnSite = true - -optIn.annotationNewlines = true - -rewrite.rules = [SortImports, PreferCurlyFors, AvoidInfix] \ No newline at end of file From 33a1fe3f7b747c5d114cbf95deff506532c37753 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 19 Apr 2024 11:06:07 -0700 Subject: [PATCH 269/273] Move barstools tapeout src to tools/tapeout --- .../tapeout/src/main/scala}/macrolib/ConfReader.scala | 0 .../tapeout/src/main/scala}/macrolib/FillerMacroBase.scala | 0 .../tapeout/src/main/scala}/macrolib/FlipChipMacro.scala | 0 .../mdf => tools/tapeout/src/main/scala}/macrolib/IOMacro.scala | 0 .../mdf => tools/tapeout/src/main/scala}/macrolib/MacroLib.scala | 0 .../mdf => tools/tapeout/src/main/scala}/macrolib/SRAM.scala | 0 .../mdf => tools/tapeout/src/main/scala}/macrolib/Utils.scala | 0 .../tapeout/src/main/scala}/macros/CostMetric.scala | 0 .../tapeout/src/main/scala}/macros/MacroCompiler.scala | 0 .../tapeout/src/main/scala}/macros/SynFlopsPass.scala | 0 .../barstools => tools/tapeout/src/main/scala}/macros/Utils.scala | 0 .../tapeout/src/main/scala}/transforms/ExtraTransforms.scala | 0 .../src/main/scala}/transforms/GenerateModelStageMain.scala | 0 .../tapeout/src/main/scala}/transforms/retime/Retime.scala | 0 .../tapeout/src/main/scala}/transforms/stage/TapeoutStage.scala | 0 .../tapeout/src/main/scala}/transforms/utils/FileUtils.scala | 0 .../src/main/scala}/transforms/utils/LowerAnnotations.scala | 0 .../src/main/scala}/transforms/utils/ProgrammaticBundle.scala | 0 .../tapeout/src/main/scala}/transforms/utils/YamlHelpers.scala | 0 19 files changed, 0 insertions(+), 0 deletions(-) rename {src/main/scala/mdf => tools/tapeout/src/main/scala}/macrolib/ConfReader.scala (100%) rename {src/main/scala/mdf => tools/tapeout/src/main/scala}/macrolib/FillerMacroBase.scala (100%) rename {src/main/scala/mdf => tools/tapeout/src/main/scala}/macrolib/FlipChipMacro.scala (100%) rename {src/main/scala/mdf => tools/tapeout/src/main/scala}/macrolib/IOMacro.scala (100%) rename {src/main/scala/mdf => tools/tapeout/src/main/scala}/macrolib/MacroLib.scala (100%) rename {src/main/scala/mdf => tools/tapeout/src/main/scala}/macrolib/SRAM.scala (100%) rename {src/main/scala/mdf => tools/tapeout/src/main/scala}/macrolib/Utils.scala (100%) rename {src/main/scala/barstools => tools/tapeout/src/main/scala}/macros/CostMetric.scala (100%) rename {src/main/scala/barstools => tools/tapeout/src/main/scala}/macros/MacroCompiler.scala (100%) rename {src/main/scala/barstools => tools/tapeout/src/main/scala}/macros/SynFlopsPass.scala (100%) rename {src/main/scala/barstools => tools/tapeout/src/main/scala}/macros/Utils.scala (100%) rename {src/main/scala/barstools/tapeout => tools/tapeout/src/main/scala}/transforms/ExtraTransforms.scala (100%) rename {src/main/scala/barstools/tapeout => tools/tapeout/src/main/scala}/transforms/GenerateModelStageMain.scala (100%) rename {src/main/scala/barstools/tapeout => tools/tapeout/src/main/scala}/transforms/retime/Retime.scala (100%) rename {src/main/scala/barstools/tapeout => tools/tapeout/src/main/scala}/transforms/stage/TapeoutStage.scala (100%) rename {src/main/scala/barstools/tapeout => tools/tapeout/src/main/scala}/transforms/utils/FileUtils.scala (100%) rename {src/main/scala/barstools/tapeout => tools/tapeout/src/main/scala}/transforms/utils/LowerAnnotations.scala (100%) rename {src/main/scala/barstools/tapeout => tools/tapeout/src/main/scala}/transforms/utils/ProgrammaticBundle.scala (100%) rename {src/main/scala/barstools/tapeout => tools/tapeout/src/main/scala}/transforms/utils/YamlHelpers.scala (100%) diff --git a/src/main/scala/mdf/macrolib/ConfReader.scala b/tools/tapeout/src/main/scala/macrolib/ConfReader.scala similarity index 100% rename from src/main/scala/mdf/macrolib/ConfReader.scala rename to tools/tapeout/src/main/scala/macrolib/ConfReader.scala diff --git a/src/main/scala/mdf/macrolib/FillerMacroBase.scala b/tools/tapeout/src/main/scala/macrolib/FillerMacroBase.scala similarity index 100% rename from src/main/scala/mdf/macrolib/FillerMacroBase.scala rename to tools/tapeout/src/main/scala/macrolib/FillerMacroBase.scala diff --git a/src/main/scala/mdf/macrolib/FlipChipMacro.scala b/tools/tapeout/src/main/scala/macrolib/FlipChipMacro.scala similarity index 100% rename from src/main/scala/mdf/macrolib/FlipChipMacro.scala rename to tools/tapeout/src/main/scala/macrolib/FlipChipMacro.scala diff --git a/src/main/scala/mdf/macrolib/IOMacro.scala b/tools/tapeout/src/main/scala/macrolib/IOMacro.scala similarity index 100% rename from src/main/scala/mdf/macrolib/IOMacro.scala rename to tools/tapeout/src/main/scala/macrolib/IOMacro.scala diff --git a/src/main/scala/mdf/macrolib/MacroLib.scala b/tools/tapeout/src/main/scala/macrolib/MacroLib.scala similarity index 100% rename from src/main/scala/mdf/macrolib/MacroLib.scala rename to tools/tapeout/src/main/scala/macrolib/MacroLib.scala diff --git a/src/main/scala/mdf/macrolib/SRAM.scala b/tools/tapeout/src/main/scala/macrolib/SRAM.scala similarity index 100% rename from src/main/scala/mdf/macrolib/SRAM.scala rename to tools/tapeout/src/main/scala/macrolib/SRAM.scala diff --git a/src/main/scala/mdf/macrolib/Utils.scala b/tools/tapeout/src/main/scala/macrolib/Utils.scala similarity index 100% rename from src/main/scala/mdf/macrolib/Utils.scala rename to tools/tapeout/src/main/scala/macrolib/Utils.scala diff --git a/src/main/scala/barstools/macros/CostMetric.scala b/tools/tapeout/src/main/scala/macros/CostMetric.scala similarity index 100% rename from src/main/scala/barstools/macros/CostMetric.scala rename to tools/tapeout/src/main/scala/macros/CostMetric.scala diff --git a/src/main/scala/barstools/macros/MacroCompiler.scala b/tools/tapeout/src/main/scala/macros/MacroCompiler.scala similarity index 100% rename from src/main/scala/barstools/macros/MacroCompiler.scala rename to tools/tapeout/src/main/scala/macros/MacroCompiler.scala diff --git a/src/main/scala/barstools/macros/SynFlopsPass.scala b/tools/tapeout/src/main/scala/macros/SynFlopsPass.scala similarity index 100% rename from src/main/scala/barstools/macros/SynFlopsPass.scala rename to tools/tapeout/src/main/scala/macros/SynFlopsPass.scala diff --git a/src/main/scala/barstools/macros/Utils.scala b/tools/tapeout/src/main/scala/macros/Utils.scala similarity index 100% rename from src/main/scala/barstools/macros/Utils.scala rename to tools/tapeout/src/main/scala/macros/Utils.scala diff --git a/src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala b/tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala similarity index 100% rename from src/main/scala/barstools/tapeout/transforms/ExtraTransforms.scala rename to tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala diff --git a/src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala b/tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala similarity index 100% rename from src/main/scala/barstools/tapeout/transforms/GenerateModelStageMain.scala rename to tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala diff --git a/src/main/scala/barstools/tapeout/transforms/retime/Retime.scala b/tools/tapeout/src/main/scala/transforms/retime/Retime.scala similarity index 100% rename from src/main/scala/barstools/tapeout/transforms/retime/Retime.scala rename to tools/tapeout/src/main/scala/transforms/retime/Retime.scala diff --git a/src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala b/tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala similarity index 100% rename from src/main/scala/barstools/tapeout/transforms/stage/TapeoutStage.scala rename to tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala diff --git a/src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala b/tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala similarity index 100% rename from src/main/scala/barstools/tapeout/transforms/utils/FileUtils.scala rename to tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala diff --git a/src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala b/tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala similarity index 100% rename from src/main/scala/barstools/tapeout/transforms/utils/LowerAnnotations.scala rename to tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala diff --git a/src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala b/tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala similarity index 100% rename from src/main/scala/barstools/tapeout/transforms/utils/ProgrammaticBundle.scala rename to tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala diff --git a/src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala b/tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala similarity index 100% rename from src/main/scala/barstools/tapeout/transforms/utils/YamlHelpers.scala rename to tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala From ac11f6dbd1f6f3439d6543342624731b7a7e7f8d Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 19 Apr 2024 11:07:01 -0700 Subject: [PATCH 270/273] Remove barstools tests --- src/test/resources/PadAnnotationVerilogPart.v | 231 --- src/test/resources/bumps.json | 41 - src/test/resources/io_properties.json | 663 ------- src/test/resources/lib-BOOMTest.json | 1165 ----------- src/test/resources/lib-MaskPortTest.json | 29 - src/test/resources/lib-WriteEnableTest.json | 26 - .../scala/barstools/macros/CostFunction.scala | 114 -- .../scala/barstools/macros/Functional.scala | 120 -- .../barstools/macros/MacroCompilerSpec.scala | 546 ----- src/test/scala/barstools/macros/Masks.scala | 383 ---- .../scala/barstools/macros/MultiPort.scala | 500 ----- .../scala/barstools/macros/SRAMCompiler.scala | 21 - .../barstools/macros/SimpleSplitDepth.scala | 638 ------ .../barstools/macros/SimpleSplitWidth.scala | 608 ------ .../barstools/macros/SpecificExamples.scala | 1762 ----------------- .../scala/barstools/macros/SynFlops.scala | 455 ----- .../tapeout/transforms/GenerateSpec.scala | 104 - .../transforms/retime/RetimeSpec.scala | 119 -- .../scala/mdf/macrolib/ConfReaderSpec.scala | 101 - .../mdf/macrolib/FlipChipMacroSpec.scala | 15 - src/test/scala/mdf/macrolib/IOMacroSpec.scala | 67 - .../scala/mdf/macrolib/IOPropertiesSpec.scala | 15 - .../scala/mdf/macrolib/MacroLibOutput.scala | 270 --- .../scala/mdf/macrolib/MacroLibSpec.scala | 406 ---- 24 files changed, 8399 deletions(-) delete mode 100644 src/test/resources/PadAnnotationVerilogPart.v delete mode 100644 src/test/resources/bumps.json delete mode 100644 src/test/resources/io_properties.json delete mode 100644 src/test/resources/lib-BOOMTest.json delete mode 100644 src/test/resources/lib-MaskPortTest.json delete mode 100644 src/test/resources/lib-WriteEnableTest.json delete mode 100644 src/test/scala/barstools/macros/CostFunction.scala delete mode 100644 src/test/scala/barstools/macros/Functional.scala delete mode 100644 src/test/scala/barstools/macros/MacroCompilerSpec.scala delete mode 100644 src/test/scala/barstools/macros/Masks.scala delete mode 100644 src/test/scala/barstools/macros/MultiPort.scala delete mode 100644 src/test/scala/barstools/macros/SRAMCompiler.scala delete mode 100644 src/test/scala/barstools/macros/SimpleSplitDepth.scala delete mode 100644 src/test/scala/barstools/macros/SimpleSplitWidth.scala delete mode 100644 src/test/scala/barstools/macros/SpecificExamples.scala delete mode 100644 src/test/scala/barstools/macros/SynFlops.scala delete mode 100644 src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala delete mode 100644 src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala delete mode 100644 src/test/scala/mdf/macrolib/ConfReaderSpec.scala delete mode 100644 src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala delete mode 100644 src/test/scala/mdf/macrolib/IOMacroSpec.scala delete mode 100644 src/test/scala/mdf/macrolib/IOPropertiesSpec.scala delete mode 100644 src/test/scala/mdf/macrolib/MacroLibOutput.scala delete mode 100644 src/test/scala/mdf/macrolib/MacroLibSpec.scala diff --git a/src/test/resources/PadAnnotationVerilogPart.v b/src/test/resources/PadAnnotationVerilogPart.v deleted file mode 100644 index 9e4b257f7..000000000 --- a/src/test/resources/PadAnnotationVerilogPart.v +++ /dev/null @@ -1,231 +0,0 @@ -module ExampleTopModuleWithBB_PadFrame( - output clock_Int, - output reset_Int, - output [14:0] io_a_Int, - output [14:0] io_b_Int, - output [13:0] io_c_Int, - input [15:0] io_x_Int, - input [15:0] io_y_Int, - input [15:0] io_z_Int, - input [4:0] io_v_0_Int, - input [4:0] io_v_1_Int, - input [4:0] io_v_2_Int, - input clock_Ext, - input reset_Ext, - input [14:0] io_a_Ext, - input [14:0] io_b_Ext, - input [13:0] io_c_Ext, - output [15:0] io_x_Ext, - output [15:0] io_y_Ext, - output [15:0] io_z_Ext, - inout [2:0] io_analog1_Ext, - inout [2:0] io_analog2_Ext, - output [4:0] io_v_0_Ext, - output [4:0] io_v_1_Ext, - output [4:0] io_v_2_Ext -); - wire pad_digital_from_tristate_foundry_vertical_input_array_reset_in; - wire pad_digital_from_tristate_foundry_vertical_input_array_reset_out; - wire [14:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_a_in; - wire [14:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_a_out; - wire [14:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_b_in; - wire [14:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_b_out; - wire [13:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_c_in; - wire [13:0] pad_digital_from_tristate_foundry_horizontal_input_array_io_c_out; - wire [15:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_x_in; - wire [15:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_x_out; - wire [15:0] pad_digital_from_tristate_foundry_vertical_output_array_io_z_in; - wire [15:0] pad_digital_from_tristate_foundry_vertical_output_array_io_z_out; - wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_in; - wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_out; - wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_in; - wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_out; - wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_in; - wire [4:0] pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_out; - pad_digital_from_tristate_foundry_vertical_input_array #(.WIDTH(1)) pad_digital_from_tristate_foundry_vertical_input_array_reset ( - .in(pad_digital_from_tristate_foundry_vertical_input_array_reset_in), - .out(pad_digital_from_tristate_foundry_vertical_input_array_reset_out) - ); - pad_digital_from_tristate_foundry_horizontal_input_array #(.WIDTH(15)) pad_digital_from_tristate_foundry_horizontal_input_array_io_a ( - .in(pad_digital_from_tristate_foundry_horizontal_input_array_io_a_in), - .out(pad_digital_from_tristate_foundry_horizontal_input_array_io_a_out) - ); - pad_digital_from_tristate_foundry_horizontal_input_array #(.WIDTH(15)) pad_digital_from_tristate_foundry_horizontal_input_array_io_b ( - .in(pad_digital_from_tristate_foundry_horizontal_input_array_io_b_in), - .out(pad_digital_from_tristate_foundry_horizontal_input_array_io_b_out) - ); - pad_digital_from_tristate_foundry_horizontal_input_array #(.WIDTH(14)) pad_digital_from_tristate_foundry_horizontal_input_array_io_c ( - .in(pad_digital_from_tristate_foundry_horizontal_input_array_io_c_in), - .out(pad_digital_from_tristate_foundry_horizontal_input_array_io_c_out) - ); - pad_digital_from_tristate_foundry_horizontal_output_array #(.WIDTH(16)) pad_digital_from_tristate_foundry_horizontal_output_array_io_x ( - .in(pad_digital_from_tristate_foundry_horizontal_output_array_io_x_in), - .out(pad_digital_from_tristate_foundry_horizontal_output_array_io_x_out) - ); - pad_digital_from_tristate_foundry_vertical_output_array #(.WIDTH(16)) pad_digital_from_tristate_foundry_vertical_output_array_io_z ( - .in(pad_digital_from_tristate_foundry_vertical_output_array_io_z_in), - .out(pad_digital_from_tristate_foundry_vertical_output_array_io_z_out) - ); - pad_analog_fast_custom_horizontal_array #(.WIDTH(3)) pad_analog_fast_custom_horizontal_array_io_analog1 ( - .io(io_analog1_Ext) - ); - pad_analog_slow_foundry_vertical_array #(.WIDTH(3)) pad_analog_slow_foundry_vertical_array_io_analog2 ( - .io(io_analog2_Ext) - ); - pad_digital_from_tristate_foundry_horizontal_output_array #(.WIDTH(5)) pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0 ( - .in(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_in), - .out(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_out) - ); - pad_digital_from_tristate_foundry_horizontal_output_array #(.WIDTH(5)) pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1 ( - .in(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_in), - .out(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_out) - ); - pad_digital_from_tristate_foundry_horizontal_output_array #(.WIDTH(5)) pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2 ( - .in(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_in), - .out(pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_out) - ); - pad_supply_vdd_horizontal pad_supply_vdd_horizontal_left_0 ( - ); - pad_supply_vdd_horizontal pad_supply_vdd_horizontal_left_1 ( - ); - pad_supply_vdd_horizontal pad_supply_vdd_horizontal_left_2 ( - ); - pad_supply_vdd_vertical pad_supply_vdd_vertical_bottom_0 ( - ); - pad_supply_vdd_vertical pad_supply_vdd_vertical_bottom_1 ( - ); - pad_supply_vss_horizontal pad_supply_vss_horizontal_right_0 ( - ); - assign clock_Int = clock_Ext; - assign reset_Int = pad_digital_from_tristate_foundry_vertical_input_array_reset_out; - assign io_a_Int = pad_digital_from_tristate_foundry_horizontal_input_array_io_a_out; - assign io_b_Int = pad_digital_from_tristate_foundry_horizontal_input_array_io_b_out; - assign io_c_Int = $signed(pad_digital_from_tristate_foundry_horizontal_input_array_io_c_out); - assign io_x_Ext = pad_digital_from_tristate_foundry_horizontal_output_array_io_x_out; - assign io_y_Ext = io_y_Int; - assign io_z_Ext = $signed(pad_digital_from_tristate_foundry_vertical_output_array_io_z_out); - assign io_v_0_Ext = pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_out; - assign io_v_1_Ext = pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_out; - assign io_v_2_Ext = pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_out; - assign pad_digital_from_tristate_foundry_vertical_input_array_reset_in = reset_Ext; - assign pad_digital_from_tristate_foundry_horizontal_input_array_io_a_in = io_a_Ext; - assign pad_digital_from_tristate_foundry_horizontal_input_array_io_b_in = io_b_Ext; - assign pad_digital_from_tristate_foundry_horizontal_input_array_io_c_in = $unsigned(io_c_Ext); - assign pad_digital_from_tristate_foundry_horizontal_output_array_io_x_in = io_x_Int; - assign pad_digital_from_tristate_foundry_vertical_output_array_io_z_in = $unsigned(io_z_Int); - assign pad_digital_from_tristate_foundry_horizontal_output_array_io_v_0_in = io_v_0_Int; - assign pad_digital_from_tristate_foundry_horizontal_output_array_io_v_1_in = io_v_1_Int; - assign pad_digital_from_tristate_foundry_horizontal_output_array_io_v_2_in = io_v_2_Int; -endmodule -module ExampleTopModuleWithBB( - input clock, - input reset, - input [14:0] io_a, - input [14:0] io_b, - input [13:0] io_c, - output [15:0] io_x, - output [15:0] io_y, - output [15:0] io_z, - inout [2:0] io_analog1, - inout [2:0] io_analog2, - output [4:0] io_v_0, - output [4:0] io_v_1, - output [4:0] io_v_2 -); - wire ExampleTopModuleWithBB_PadFrame_clock_Int; - wire ExampleTopModuleWithBB_PadFrame_reset_Int; - wire [14:0] ExampleTopModuleWithBB_PadFrame_io_a_Int; - wire [14:0] ExampleTopModuleWithBB_PadFrame_io_b_Int; - wire [13:0] ExampleTopModuleWithBB_PadFrame_io_c_Int; - wire [15:0] ExampleTopModuleWithBB_PadFrame_io_x_Int; - wire [15:0] ExampleTopModuleWithBB_PadFrame_io_y_Int; - wire [15:0] ExampleTopModuleWithBB_PadFrame_io_z_Int; - wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_0_Int; - wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_1_Int; - wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_2_Int; - wire ExampleTopModuleWithBB_PadFrame_clock_Ext; - wire ExampleTopModuleWithBB_PadFrame_reset_Ext; - wire [14:0] ExampleTopModuleWithBB_PadFrame_io_a_Ext; - wire [14:0] ExampleTopModuleWithBB_PadFrame_io_b_Ext; - wire [13:0] ExampleTopModuleWithBB_PadFrame_io_c_Ext; - wire [15:0] ExampleTopModuleWithBB_PadFrame_io_x_Ext; - wire [15:0] ExampleTopModuleWithBB_PadFrame_io_y_Ext; - wire [15:0] ExampleTopModuleWithBB_PadFrame_io_z_Ext; - wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_0_Ext; - wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_1_Ext; - wire [4:0] ExampleTopModuleWithBB_PadFrame_io_v_2_Ext; - wire ExampleTopModuleWithBB_Internal_clock; - wire ExampleTopModuleWithBB_Internal_reset; - wire [14:0] ExampleTopModuleWithBB_Internal_io_a; - wire [14:0] ExampleTopModuleWithBB_Internal_io_b; - wire [13:0] ExampleTopModuleWithBB_Internal_io_c; - wire [15:0] ExampleTopModuleWithBB_Internal_io_x; - wire [15:0] ExampleTopModuleWithBB_Internal_io_y; - wire [15:0] ExampleTopModuleWithBB_Internal_io_z; - wire [4:0] ExampleTopModuleWithBB_Internal_io_v_0; - wire [4:0] ExampleTopModuleWithBB_Internal_io_v_1; - wire [4:0] ExampleTopModuleWithBB_Internal_io_v_2; - ExampleTopModuleWithBB_PadFrame ExampleTopModuleWithBB_PadFrame ( - .clock_Int(ExampleTopModuleWithBB_PadFrame_clock_Int), - .reset_Int(ExampleTopModuleWithBB_PadFrame_reset_Int), - .io_a_Int(ExampleTopModuleWithBB_PadFrame_io_a_Int), - .io_b_Int(ExampleTopModuleWithBB_PadFrame_io_b_Int), - .io_c_Int(ExampleTopModuleWithBB_PadFrame_io_c_Int), - .io_x_Int(ExampleTopModuleWithBB_PadFrame_io_x_Int), - .io_y_Int(ExampleTopModuleWithBB_PadFrame_io_y_Int), - .io_z_Int(ExampleTopModuleWithBB_PadFrame_io_z_Int), - .io_v_0_Int(ExampleTopModuleWithBB_PadFrame_io_v_0_Int), - .io_v_1_Int(ExampleTopModuleWithBB_PadFrame_io_v_1_Int), - .io_v_2_Int(ExampleTopModuleWithBB_PadFrame_io_v_2_Int), - .clock_Ext(ExampleTopModuleWithBB_PadFrame_clock_Ext), - .reset_Ext(ExampleTopModuleWithBB_PadFrame_reset_Ext), - .io_a_Ext(ExampleTopModuleWithBB_PadFrame_io_a_Ext), - .io_b_Ext(ExampleTopModuleWithBB_PadFrame_io_b_Ext), - .io_c_Ext(ExampleTopModuleWithBB_PadFrame_io_c_Ext), - .io_x_Ext(ExampleTopModuleWithBB_PadFrame_io_x_Ext), - .io_y_Ext(ExampleTopModuleWithBB_PadFrame_io_y_Ext), - .io_z_Ext(ExampleTopModuleWithBB_PadFrame_io_z_Ext), - .io_analog1_Ext(io_analog1), - .io_analog2_Ext(io_analog2), - .io_v_0_Ext(ExampleTopModuleWithBB_PadFrame_io_v_0_Ext), - .io_v_1_Ext(ExampleTopModuleWithBB_PadFrame_io_v_1_Ext), - .io_v_2_Ext(ExampleTopModuleWithBB_PadFrame_io_v_2_Ext) - ); - ExampleTopModuleWithBB_Internal ExampleTopModuleWithBB_Internal ( - .clock(ExampleTopModuleWithBB_Internal_clock), - .reset(ExampleTopModuleWithBB_Internal_reset), - .io_a(ExampleTopModuleWithBB_Internal_io_a), - .io_b(ExampleTopModuleWithBB_Internal_io_b), - .io_c(ExampleTopModuleWithBB_Internal_io_c), - .io_x(ExampleTopModuleWithBB_Internal_io_x), - .io_y(ExampleTopModuleWithBB_Internal_io_y), - .io_z(ExampleTopModuleWithBB_Internal_io_z), - .io_analog1(io_analog1), - .io_analog2(io_analog2), - .io_v_0(ExampleTopModuleWithBB_Internal_io_v_0), - .io_v_1(ExampleTopModuleWithBB_Internal_io_v_1), - .io_v_2(ExampleTopModuleWithBB_Internal_io_v_2) - ); - assign io_x = ExampleTopModuleWithBB_PadFrame_io_x_Ext; - assign io_y = ExampleTopModuleWithBB_PadFrame_io_y_Ext; - assign io_z = ExampleTopModuleWithBB_PadFrame_io_z_Ext; - assign io_v_0 = ExampleTopModuleWithBB_PadFrame_io_v_0_Ext; - assign io_v_1 = ExampleTopModuleWithBB_PadFrame_io_v_1_Ext; - assign io_v_2 = ExampleTopModuleWithBB_PadFrame_io_v_2_Ext; - assign ExampleTopModuleWithBB_PadFrame_io_x_Int = ExampleTopModuleWithBB_Internal_io_x; - assign ExampleTopModuleWithBB_PadFrame_io_y_Int = ExampleTopModuleWithBB_Internal_io_y; - assign ExampleTopModuleWithBB_PadFrame_io_z_Int = ExampleTopModuleWithBB_Internal_io_z; - assign ExampleTopModuleWithBB_PadFrame_io_v_0_Int = ExampleTopModuleWithBB_Internal_io_v_0; - assign ExampleTopModuleWithBB_PadFrame_io_v_1_Int = ExampleTopModuleWithBB_Internal_io_v_1; - assign ExampleTopModuleWithBB_PadFrame_io_v_2_Int = ExampleTopModuleWithBB_Internal_io_v_2; - assign ExampleTopModuleWithBB_PadFrame_clock_Ext = clock; - assign ExampleTopModuleWithBB_PadFrame_reset_Ext = reset; - assign ExampleTopModuleWithBB_PadFrame_io_a_Ext = io_a; - assign ExampleTopModuleWithBB_PadFrame_io_b_Ext = io_b; - assign ExampleTopModuleWithBB_PadFrame_io_c_Ext = io_c; - assign ExampleTopModuleWithBB_Internal_clock = ExampleTopModuleWithBB_PadFrame_clock_Int; - assign ExampleTopModuleWithBB_Internal_reset = ExampleTopModuleWithBB_PadFrame_reset_Int; - assign ExampleTopModuleWithBB_Internal_io_a = ExampleTopModuleWithBB_PadFrame_io_a_Int; - assign ExampleTopModuleWithBB_Internal_io_b = ExampleTopModuleWithBB_PadFrame_io_b_Int; - assign ExampleTopModuleWithBB_Internal_io_c = ExampleTopModuleWithBB_PadFrame_io_c_Int; -endmodule \ No newline at end of file diff --git a/src/test/resources/bumps.json b/src/test/resources/bumps.json deleted file mode 100644 index 21b93381c..000000000 --- a/src/test/resources/bumps.json +++ /dev/null @@ -1,41 +0,0 @@ -[ - { - "name" : "example", - "type" : "flipchip", - "bump_dimensions" : [27,27], - "bump_locations" : [ - ["-", "GND", "VDDC0_SEL[0]", "VDDC0_SEL[1]", "VDDC1_SEL[0]", "VDDC1_SEL[1]", "VDDC2_SEL[0]", "VDDC2_SEL[1]", "VDDC3_SEL[0]", "VDDC3_SEL[1]", "VDDC0_EN", "VDDC1_EN", "VDDC2_EN", "VDDC3_EN", "CCLK0", "CCLK1", "CCLK2", "RESET", "BOOT", "I2C_SDA", "I2C_SCL", "SPI_SCLK", "SPI_MOSI", "SPI_MISO", "SPI_SS_L", "GND", "-"], - [ "GND", "", "", "", "GND", "GND","GPIO[1]", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8","UART_RX","UART_TX", "GND", "GND", "GND", "GND", "", "", "", "GND"], - - ["TXP0", "VDDA", "VDDA", "GND", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "GND", "VDDA", "VDDA", "TXP4"], - ["TXN0", "VDDA", "VDDA", "GND", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "GND", "VDDA", "VDDA", "TXN4"], - [ "GND", "", "", "", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "", "", "", "GND"], - ["RXP0", "VDDA", "VDDA", "GND", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "GND", "VDDA", "VDDA", "RXP4"], - ["RXN0", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "RXN4"], - [ "GND", "", "", "", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "", "", "", "GND"], - - ["TXP1", "VDDA", "VDDA", "GND", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "GND", "VDDA", "VDDA", "TXP5"], - ["TXN1", "VDDA", "VDDA", "GND", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "GND", "VDDA", "VDDA", "TXN5"], - [ "GND", "", "", "", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "", "", "", "GND"], - ["RXP1", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "RXP5"], - ["RXN1", "VDDA", "VDDA", "GND", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "GND", "GND", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "GND", "VDDA", "VDDA", "RXN5"], - [ "GND", "", "", "", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "VDDC1", "GND", "GND", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC0", "VDDC0", "VDDC0", "VDDC0", "", "", "", "GND"], - - ["TXP2", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "TXP6"], - ["TXN2", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "TXN6"], - [ "GND", "", "", "", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "", "", "", "GND"], - ["RXP2", "VDDA", "VDDA", "GND", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC2", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "VDDC3", "GND", "VDDA", "VDDA", "RXP6"], - ["RXN2", "VDDA", "VDDA", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "VDDA", "VDDA", "RXN6"], - [ "GND", "", "", "", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "GND", "", "", "", "GND"], - - ["TXP3", "VDDA", "VDDA", "GND", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "GND", "VDDA", "VDDA", "TXP7"], - ["TXN3", "VDDA", "VDDA", "GND", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "VDD0V8", "GND", "VDDA", "VDDA", "TXN7"], - [ "GND", "", "", "", "-", "SERIAL_IN_READY", "-", "-", "SERIAL_IN_VALID", "-", "-", "-", "-", "", "-", "SERIAL_OUT_VALID", "-", "-", "SERIAL_OUT_READY", "-", "-", "GPIO[0]", "-", "", "", "", "GND"], - ["RXP3", "VDDA", "VDDA", "GND", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "GND", "VDDA", "VDDA", "RXP7"], - ["RXN3", "VDDA", "VDDA", "GND", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "GND", "VDDA", "VDDA", "RXN7"], - - [ "GND", "", "", "", "GND", "GND", "GND", "GND", "GND", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "VDD1V8", "GND", "GND", "GND", "GND", "GND", "", "", "", "GND"], - ["-", "GND", "REFCLK0P", "REFCLK0N", "GND", "SERIAL_OUT[0]", "SERIAL_OUT[1]", "SERIAL_OUT[2]", "SERIAL_OUT[3]", "SERIAL_IN[0]", "SERIAL_IN[1]", "SERIAL_IN[2]", "SERIAL_IN[3]", "JTAG_TMS", "JTAG_TCK", "JTAG_TDO", "JTAG_TDI", "CLKSEL", "PLLCLK_OUT", "GND", "PLLREFCLKP", "PLLREFCLKN", "GND", "REFCLK1P", "REFCLK1N", "GND", "-"] - ] - } -] diff --git a/src/test/resources/io_properties.json b/src/test/resources/io_properties.json deleted file mode 100644 index 93b945a39..000000000 --- a/src/test/resources/io_properties.json +++ /dev/null @@ -1,663 +0,0 @@ -[ - { - "name": "My IOs", - "type": "io_properties", - "top": "EAGLE", - "ios": [ - { - "name": "GND", - "type": "ground" - }, - { - "name": "VDD0V8", - "type": "power" - }, - { - "name": "VDD1V8", - "type": "power" - }, - { - "name": "VDDC0", - "type": "power" - }, - { - "name": "VDDC1", - "type": "power" - }, - { - "name": "VDDC2", - "type": "power" - }, - { - "name": "VDDC3", - "type": "power" - }, - { - "name": "VDDA", - "type": "power" - }, - { - "name": "VDDC0_SEL[1:0]", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "VDDC1_SEL[1:0]", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "VDDC2_SEL[1:0]", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "VDDC3_SEL[1:0]", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "VDDDC0_EN", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "VDDDC1_EN", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "VDDDC2_EN", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "VDDDC3_EN", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "CCLK0", - "type": "digital", - "direction": "input", - "termination": 50, - "termination_type": "single", - "termination_reference": "GND" - }, - { - "name": "CCLK1", - "type": "digital", - "direction": "input", - "termination": 50, - "termination_type": "single", - "termination_reference": "GND" - }, - { - "name": "CCLK2", - "type": "digital", - "direction": "input", - "termination": 50, - "termination_type": "single", - "termination_reference": "GND" - }, - { - "name": "RESET", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "BOOT", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "I2C_SDA", - "type": "digital", - "direction": "inout", - "termination": "open-drain" - }, - { - "name": "I2C_SCL", - "type": "digital", - "direction": "inout", - "termination": "open-drain" - }, - { - "name": "SPI_SCLK", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "SPI_MOSI", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "SPI_MISO", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "SPI_SS_L", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "GPIO[1:0]", - "type": "digital", - "direction": "inout", - "termination": "CMOS" - }, - { - "name": "UART_RX", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "UART_TX", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "SERIAL_IN_READY", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "SERIAL_IN_VALID", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "SERIAL_OUT_READY", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "SERIAL_OUT_VALID", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "SERIAL_OUT[3:0]", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "SERIAL_IN[3:0]", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "REFCLK0P", - "type": "analog", - "direction": "input", - "match": [ - "REFCLK0N" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "REFCLK0N", - "type": "analog", - "direction": "input", - "match": [ - "REFCLK0P" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "REFCLK1N", - "type": "analog", - "direction": "input", - "match": [ - "REFCLK1P" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "REFCLK1P", - "type": "analog", - "direction": "input", - "match": [ - "REFCLK1N" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "PLLREFCLKP", - "type": "analog", - "direction": "input", - "match": [ - "PLLREFCLKP" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "PLLREFCLKN", - "type": "analog", - "direction": "input", - "match": [ - "PLLREFCLKP" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "JTAG_TMS", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "JTAG_TCK", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "JTAG_TDI", - "type": "digital", - "direction": "input", - "termination": "CMOS" - }, - { - "name": "JTAG_TDO", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "PLLCLK_OUT", - "type": "digital", - "direction": "output", - "termination": "CMOS" - }, - { - "name": "TXP0", - "type": "analog", - "direction": "output", - "match": [ - "TXN0" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXN0", - "type": "analog", - "direction": "output", - "match": [ - "TXP0" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXP0", - "type": "analog", - "direction": "output", - "match": [ - "RXN0" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXN1", - "type": "analog", - "direction": "input", - "match": [ - "RXP1" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXP1", - "type": "analog", - "direction": "output", - "match": [ - "TXN1" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXN1", - "type": "analog", - "direction": "output", - "match": [ - "TXP1" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXP1", - "type": "analog", - "direction": "output", - "match": [ - "RXN1" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXN1", - "type": "analog", - "direction": "input", - "match": [ - "RXP1" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXP2", - "type": "analog", - "direction": "output", - "match": [ - "TXN2" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXN2", - "type": "analog", - "direction": "output", - "match": [ - "TXP2" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXP2", - "type": "analog", - "direction": "output", - "match": [ - "RXN2" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXN2", - "type": "analog", - "direction": "input", - "match": [ - "RXP2" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXP3", - "type": "analog", - "direction": "output", - "match": [ - "TXN3" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXN3", - "type": "analog", - "direction": "output", - "match": [ - "TXP3" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXP3", - "type": "analog", - "direction": "output", - "match": [ - "RXN3" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXN3", - "type": "analog", - "direction": "input", - "match": [ - "RXP3" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXP4", - "type": "analog", - "direction": "output", - "match": [ - "TXN4" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXN4", - "type": "analog", - "direction": "output", - "match": [ - "TXP4" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXP4", - "type": "analog", - "direction": "output", - "match": [ - "RXN4" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXN4", - "type": "analog", - "direction": "input", - "match": [ - "RXP4" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXP5", - "type": "analog", - "direction": "output", - "match": [ - "TXN5" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXN5", - "type": "analog", - "direction": "output", - "match": [ - "TXP5" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXP5", - "type": "analog", - "direction": "output", - "match": [ - "RXN5" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXN5", - "type": "analog", - "direction": "input", - "match": [ - "RXP5" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXP6", - "type": "analog", - "direction": "output", - "match": [ - "TXN6" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXN6", - "type": "analog", - "direction": "output", - "match": [ - "TXP6" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXP6", - "type": "analog", - "direction": "output", - "match": [ - "RXN6" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXN6", - "type": "analog", - "direction": "input", - "match": [ - "RXP6" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXP7", - "type": "analog", - "direction": "output", - "match": [ - "TXN7" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "TXN7", - "type": "analog", - "direction": "output", - "match": [ - "TXP7" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXP7", - "type": "analog", - "direction": "output", - "match": [ - "RXN7" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - }, - { - "name": "RXN7", - "type": "analog", - "direction": "input", - "match": [ - "RXP7" - ], - "termination": 100, - "termination_type": "differential", - "termination_reference": "GND" - } - ] - } -] diff --git a/src/test/resources/lib-BOOMTest.json b/src/test/resources/lib-BOOMTest.json deleted file mode 100644 index 1d2e5f697..000000000 --- a/src/test/resources/lib-BOOMTest.json +++ /dev/null @@ -1,1165 +0,0 @@ -[ - { - "family": "1rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_1024x8", - "type": "sram", - "depth": "1024" - }, - { - "family": "1rw", - "width": 46, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_128x46", - "type": "sram", - "depth": "128" - }, - { - "family": "1rw", - "width": 48, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_128x48", - "type": "sram", - "depth": "128" - }, - { - "family": "1rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_128x8", - "type": "sram", - "depth": "128" - }, - { - "family": "1rw", - "width": 128, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_256x128", - "type": "sram", - "depth": "256" - }, - { - "family": "1rw", - "width": 32, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_256x32", - "type": "sram", - "depth": "256" - }, - { - "family": "1rw", - "width": 46, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_256x46", - "type": "sram", - "depth": "256" - }, - { - "family": "1rw", - "width": 48, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_256x48", - "type": "sram", - "depth": "256" - }, - { - "family": "1rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_256x8", - "type": "sram", - "depth": "256" - }, - { - "family": "1rw", - "width": 50, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_32x50", - "type": "sram", - "depth": "32" - }, - { - "family": "1rw", - "width": 128, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_512x128", - "type": "sram", - "depth": "512" - }, - { - "family": "1rw", - "width": 32, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_512x32", - "type": "sram", - "depth": "512" - }, - { - "family": "1rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_512x8", - "type": "sram", - "depth": "512" - }, - { - "family": "1rw", - "width": 128, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_64x128", - "type": "sram", - "depth": "64" - }, - { - "family": "1rw", - "width": 32, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_64x32", - "type": "sram", - "depth": "64" - }, - { - "family": "1rw", - "width": 34, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_64x34", - "type": "sram", - "depth": "64" - }, - { - "family": "1rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB", - "write enable port name": "WEB", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE", - "chip enable port polarity": "active low", - "address port name": "A", - "read enable port name": "OEB", - "input port name": "I", - "input port polarity": "active high" - } - ], - "name": "my_sram_1rw_64x8", - "type": "sram", - "depth": "64" - }, - { - "family": "2rw", - "width": 16, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_128x16", - "type": "sram", - "depth": "128" - }, - { - "family": "2rw", - "width": 32, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_128x32", - "type": "sram", - "depth": "128" - }, - { - "family": "2rw", - "width": 4, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_128x4", - "type": "sram", - "depth": "128" - }, - { - "family": "2rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_128x8", - "type": "sram", - "depth": "128" - }, - { - "family": "2rw", - "width": 16, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_16x16", - "type": "sram", - "depth": "16" - }, - { - "family": "2rw", - "width": 32, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_16x32", - "type": "sram", - "depth": "16" - }, - { - "family": "2rw", - "width": 4, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_16x4", - "type": "sram", - "depth": "16" - }, - { - "family": "2rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_16x8", - "type": "sram", - "depth": "16" - }, - { - "family": "2rw", - "width": 16, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_32x16", - "type": "sram", - "depth": "32" - }, - { - "family": "2rw", - "width": 22, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_32x22", - "type": "sram", - "depth": "32" - }, - { - "family": "2rw", - "width": 32, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_32x32", - "type": "sram", - "depth": "32" - }, - { - "family": "2rw", - "width": 39, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_32x39", - "type": "sram", - "depth": "32" - }, - { - "family": "2rw", - "width": 4, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_32x4", - "type": "sram", - "depth": "32" - }, - { - "family": "2rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_32x8", - "type": "sram", - "depth": "32" - }, - { - "family": "2rw", - "width": 16, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_64x16", - "type": "sram", - "depth": "64" - }, - { - "family": "2rw", - "width": 32, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_64x32", - "type": "sram", - "depth": "64" - }, - { - "family": "2rw", - "width": 4, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_64x4", - "type": "sram", - "depth": "64" - }, - { - "family": "2rw", - "width": 8, - "ports": [ - { - "chip enable port name": "CSB1", - "write enable port name": "WEB1", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O1", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE1", - "chip enable port polarity": "active low", - "address port name": "A1", - "read enable port name": "OEB1", - "input port name": "I1", - "input port polarity": "active high" - }, - { - "chip enable port name": "CSB2", - "write enable port name": "WEB2", - "address port polarity": "active high", - "output port polarity": "active high", - "output port name": "O2", - "write enable port polarity": "active low", - "read enable port polarity": "active low", - "clock port polarity": "positive edge", - "clock port name": "CE2", - "chip enable port polarity": "active low", - "address port name": "A2", - "read enable port name": "OEB2", - "input port name": "I2", - "input port polarity": "active high" - } - ], - "name": "my_sram_2rw_64x8", - "type": "sram", - "depth": "64" - } -] diff --git a/src/test/resources/lib-MaskPortTest.json b/src/test/resources/lib-MaskPortTest.json deleted file mode 100644 index 784aeafbb..000000000 --- a/src/test/resources/lib-MaskPortTest.json +++ /dev/null @@ -1,29 +0,0 @@ -[ - { - "type": "sram", - "name": "fake_mem", - "width": 64, - "depth": "512", - "mux": 4, - "family": "1rw", - "ports": [ - { - "address port name": "addr", - "address port polarity": "active high", - "clock port name": "clk", - "clock port polarity": "positive edge", - "write enable port name": "wen", - "write enable port polarity": "active high", - "read enable port name": "ren", - "read enable port polarity": "active high", - "output port name": "dataout", - "output port polarity": "active high", - "input port name": "datain", - "input port polarity": "active high", - "mask port name": "mport", - "mask port polarity": "active low", - "mask granularity": 1 - } - ] - } -] diff --git a/src/test/resources/lib-WriteEnableTest.json b/src/test/resources/lib-WriteEnableTest.json deleted file mode 100644 index 50acef413..000000000 --- a/src/test/resources/lib-WriteEnableTest.json +++ /dev/null @@ -1,26 +0,0 @@ -[ - { - "type": "sram", - "name": "fake_mem", - "width": 64, - "depth": "4096", - "mux": 4, - "family": "1rw", - "ports": [ - { - "address port name": "addr", - "address port polarity": "active high", - "clock port name": "clk", - "clock port polarity": "positive edge", - "write enable port name": "wen", - "write enable port polarity": "active high", - "read enable port name": "ren", - "read enable port polarity": "active high", - "output port name": "dataout", - "output port polarity": "active high", - "input port name": "datain", - "input port polarity": "active high" - } - ] - } -] diff --git a/src/test/scala/barstools/macros/CostFunction.scala b/src/test/scala/barstools/macros/CostFunction.scala deleted file mode 100644 index 62ebfcdff..000000000 --- a/src/test/scala/barstools/macros/CostFunction.scala +++ /dev/null @@ -1,114 +0,0 @@ -package barstools.macros - -import mdf.macrolib.SRAMMacro - -/** Tests to check that the cost function mechanism is working properly. */ - -/** A test metric that simply favours memories with smaller widths, to test that - * the metric is chosen properly. - */ -object TestMinWidthMetric extends CostMetric with CostMetricCompanion { - // Smaller width = lower cost = favoured - override def cost(mem: Macro, lib: Macro): Option[Double] = Some(lib.src.width) - - override def commandLineParams() = Map() - override def name() = "TestMinWidthMetric" - override def construct(m: Map[String, String]): CostMetric = TestMinWidthMetric -} - -/** Test that cost metric selection is working. */ -class SelectCostMetric extends MacroCompilerSpec with HasSRAMGenerator { - val mem = s"mem-SelectCostMetric.json" - val lib = s"lib-SelectCostMetric.json" - val v = s"SelectCostMetric.v" - - // Cost metrics must be registered for them to work with the command line. - CostMetric.registerCostMetric(TestMinWidthMetric) - - override val costMetric: Option[CostMetric] = Some(TestMinWidthMetric) - - val libSRAMs = Seq( - SRAMMacro( - name = "SRAM_WIDTH_128", - depth = BigInt(1024), - width = 128, - family = "1rw", - ports = Seq( - generateReadWritePort("", 128, BigInt(1024)) - ) - ), - SRAMMacro( - name = "SRAM_WIDTH_64", - depth = BigInt(1024), - width = 64, - family = "1rw", - ports = Seq( - generateReadWritePort("", 64, BigInt(1024)) - ) - ), - SRAMMacro( - name = "SRAM_WIDTH_32", - depth = BigInt(1024), - width = 32, - family = "1rw", - ports = Seq( - generateReadWritePort("", 32, BigInt(1024)) - ) - ) - ) - - val memSRAMs = Seq(generateSRAM("target_memory", "", 128, BigInt(1024))) - - writeToLib(lib, libSRAMs) - writeToMem(mem, memSRAMs) - - // Check that the min width SRAM was chosen, even though it is less efficient. - val output = - """ -circuit target_memory : - module target_memory : - input addr : UInt<10> - input clk : Clock - input din : UInt<128> - output dout : UInt<128> - input write_en : UInt<1> - - inst mem_0_0 of SRAM_WIDTH_32 - inst mem_0_1 of SRAM_WIDTH_32 - inst mem_0_2 of SRAM_WIDTH_32 - inst mem_0_3 of SRAM_WIDTH_32 - mem_0_0.clk <= clk - mem_0_0.addr <= addr - node dout_0_0 = bits(mem_0_0.dout, 31, 0) - mem_0_0.din <= bits(din, 31, 0) - mem_0_0.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_1.clk <= clk - mem_0_1.addr <= addr - node dout_0_1 = bits(mem_0_1.dout, 31, 0) - mem_0_1.din <= bits(din, 63, 32) - mem_0_1.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_2.clk <= clk - mem_0_2.addr <= addr - node dout_0_2 = bits(mem_0_2.dout, 31, 0) - mem_0_2.din <= bits(din, 95, 64) - mem_0_2.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_3.clk <= clk - mem_0_3.addr <= addr - node dout_0_3 = bits(mem_0_3.dout, 31, 0) - mem_0_3.din <= bits(din, 127, 96) - mem_0_3.write_en <= and(and(and(write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - node dout_0 = cat(dout_0_3, cat(dout_0_2, cat(dout_0_1, dout_0_0))) - dout <= mux(UInt<1>("h1"), dout_0, UInt<128>("h0")) - - extmodule SRAM_WIDTH_32 : - input addr : UInt<10> - input clk : Clock - input din : UInt<32> - output dout : UInt<32> - input write_en : UInt<1> - - defname = SRAM_WIDTH_32 -""" - - compileExecuteAndTest(mem, lib, v, output) -} diff --git a/src/test/scala/barstools/macros/Functional.scala b/src/test/scala/barstools/macros/Functional.scala deleted file mode 100644 index ddc33477a..000000000 --- a/src/test/scala/barstools/macros/Functional.scala +++ /dev/null @@ -1,120 +0,0 @@ -package barstools.macros - -// import firrtl.ir.Circuit -// import firrtl_interpreter.InterpretiveTester - -// // Functional tests on memory compiler outputs. - -// // Synchronous write and read back. -// class SynchronousReadAndWrite extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { -// override lazy val width = 12 -// override lazy val memDepth = BigInt(2048) -// override lazy val libDepth = BigInt(1024) - -// compile(mem, lib, v, synflops = true) -// val result: Circuit = execute(mem, lib, synflops = true) - -// it should "run with InterpretedTester" in { -// pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published - -// val addr1 = 0 -// val addr1val = 0xff -// val addr2 = 42 -// val addr2val = 0xf0 -// val addr3 = 1 << 10 -// val addr3val = 1 << 10 - -// val tester = new InterpretiveTester(result.serialize) -// //~ tester.setVerbose() - -// tester.poke("outer_write_en", 0) -// tester.step() - -// // Write addresses and read them. -// tester.poke("outer_addr", addr1) -// tester.poke("outer_din", addr1val) -// tester.poke("outer_write_en", 1) -// tester.step() -// tester.poke("outer_write_en", 0) -// tester.step() -// tester.poke("outer_addr", addr2) -// tester.poke("outer_din", addr2val) -// tester.poke("outer_write_en", 1) -// tester.step() -// tester.poke("outer_write_en", 0) -// tester.step() -// tester.poke("outer_addr", addr3) -// tester.poke("outer_din", addr3val) -// tester.poke("outer_write_en", 1) -// tester.step() -// tester.poke("outer_write_en", 0) -// tester.step() - -// tester.poke("outer_addr", addr1) -// tester.step() -// tester.expect("outer_dout", addr1val) - -// tester.poke("outer_addr", addr2) -// tester.step() -// tester.expect("outer_dout", addr2val) - -// tester.poke("outer_addr", addr3) -// tester.step() -// tester.expect("outer_dout", addr3val) -// } -// } - -// // Test to verify that the circuit doesn't read combinationally based on addr -// // between two submemories. -// class DontReadCombinationally extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { -// override lazy val width = 8 -// override lazy val memDepth = BigInt(2048) -// override lazy val libDepth = BigInt(1024) - -// compile(mem, lib, v, synflops = true) -// val result: Circuit = execute(mem, lib, synflops = true) - -// it should "run with InterpretedTester" in { -// pending // Enable this when https://github.com/freechipsproject/firrtl-interpreter/pull/88 is snapshot-published - -// val addr1 = 0 -// val addr1a = 1 -// val addr2 = 1 << 10 - -// val tester = new InterpretiveTester(result.serialize) -// //~ tester.setVerbose() - -// tester.poke("outer_write_en", 0) -// tester.step() - -// // Write two addresses, one in the lower submemory and the other in the -// // higher submemory. -// tester.poke("outer_addr", addr1) -// tester.poke("outer_din", 0x11) -// tester.poke("outer_write_en", 1) -// tester.step() -// tester.poke("outer_addr", addr1a) -// tester.poke("outer_din", 0x1a) -// tester.poke("outer_write_en", 1) -// tester.step() -// tester.poke("outer_addr", addr2) -// tester.poke("outer_din", 0xaa) -// tester.poke("outer_write_en", 1) -// tester.step() -// tester.poke("outer_write_en", 0) -// tester.poke("outer_addr", addr1) -// tester.step() - -// // Test that there is no combinational read. -// tester.poke("outer_addr", addr1) -// tester.expect("outer_dout", 0x11) -// tester.poke("outer_addr", addr1a) -// tester.expect("outer_dout", 0x11) -// tester.poke("outer_addr", addr2) -// tester.expect("outer_dout", 0x11) - -// // And upon step it should work again. -// tester.step() -// tester.expect("outer_addr", 0xaa) -// } -// } diff --git a/src/test/scala/barstools/macros/MacroCompilerSpec.scala b/src/test/scala/barstools/macros/MacroCompilerSpec.scala deleted file mode 100644 index 2cfcaed59..000000000 --- a/src/test/scala/barstools/macros/MacroCompilerSpec.scala +++ /dev/null @@ -1,546 +0,0 @@ -// See LICENSE for license details. - -package barstools.macros - -import firrtl.Parser.parse -import firrtl.ir.{Circuit, NoInfo} -import firrtl.passes.RemoveEmpty -import mdf.macrolib.SRAMMacro -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers - -import java.io.File - -abstract class MacroCompilerSpec extends AnyFlatSpec with Matchers { - import scala.language.implicitConversions - implicit def String2SomeString(i: String): Option[String] = Some(i) - val testDir: String = "test_run_dir/macros" - new File(testDir).mkdirs // Make sure the testDir exists - - // Override these to change the prefixing of macroDir and testDir - val memPrefix: String = testDir - val libPrefix: String = testDir - val vPrefix: String = testDir - - // Override this to use a different cost metric. - // If this is None, the compile() call will not have any -c/-cp arguments, and - // execute() will use CostMetric.default. - val costMetric: Option[CostMetric] = None - private def getCostMetric: CostMetric = costMetric.getOrElse(CostMetric.default) - - private def costMetricCmdLine = { - costMetric match { - case None => Nil - case Some(m) => - val name = m.name() - val params = m.commandLineParams() - List("-c", name) ++ params.flatMap { case (key, value) => List("-cp", key, value) } - } - } - - private def args(mem: String, lib: Option[String], v: String, synflops: Boolean, useCompiler: Boolean) = - List("-m", mem, "-v", v) ++ - (lib match { - case None => Nil - case Some(l) => List("-l", l) - }) ++ - costMetricCmdLine ++ - (if (synflops) List("--mode", "synflops") else Nil) ++ - (if (useCompiler) List("--use-compiler") else Nil) - - // Run the full compiler as if from the command line interface. - // Generates the Verilog; useful in testing since an error will throw an - // exception. - def compile(mem: String, lib: String, v: String, synflops: Boolean): Unit = { - compile(mem, Some(lib), v, synflops) - } - def compile(mem: String, lib: Option[String], v: String, synflops: Boolean, useCompiler: Boolean = false): Unit = { - val mem_full = concat(memPrefix, mem) - val lib_full = concat(libPrefix, lib) - val v_full = concat(vPrefix, v) - - MacroCompiler.run(args(mem_full, lib_full, v_full, synflops, useCompiler)) - } - - // Helper functions to write macro libraries to the given files. - def writeToLib(lib: String, libs: Seq[mdf.macrolib.Macro]): Boolean = { - mdf.macrolib.Utils.writeMDFToPath(Some(concat(libPrefix, lib)), libs) - } - - def writeToMem(mem: String, mems: Seq[mdf.macrolib.Macro]): Boolean = { - mdf.macrolib.Utils.writeMDFToPath(Some(concat(memPrefix, mem)), mems) - } - - // Convenience function for running both compile, execute, and test at once. - def compileExecuteAndTest( - mem: String, - lib: Option[String], - v: String, - output: String, - synflops: Boolean = false, - useCompiler: Boolean = false - ): Unit = { - compile(mem, lib, v, synflops, useCompiler) - val result = execute(mem, lib, synflops, useCompiler) - test(result, output) - } - - // Compare FIRRTL outputs after reparsing output with ScalaTest ("should be"). - def test(result: Circuit, output: String): Unit = { - val gold = RemoveEmpty.run(parse(output)) - result.serialize should be(gold.serialize) - } - - // Execute the macro compiler and returns a Circuit containing the output of - // the memory compiler. - def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean): Circuit = - execute(memFile, libFile, synflops, useCompiler = false) - def execute(memFile: Option[String], libFile: Option[String], synflops: Boolean, useCompiler: Boolean): Circuit = { - val mem_full = concat(memPrefix, memFile) - val lib_full = concat(libPrefix, libFile) - - require(memFile.isDefined) - val mems: Seq[Macro] = Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(mem_full)).get.map(new Macro(_)) - val libs: Option[Seq[Macro]] = if (useCompiler) { - Utils.findSRAMCompiler(mdf.macrolib.Utils.readMDFFromPath(lib_full)).map { x => - Utils.buildSRAMMacros(x).map(new Macro(_)) - } - } else { - Utils.filterForSRAM(mdf.macrolib.Utils.readMDFFromPath(lib_full)) match { - case Some(x) => Some(x.map(new Macro(_))) - case None => None - } - } - val macros = mems.map(_.blackbox) - val circuit = Circuit(NoInfo, macros, macros.last.name) - val passes = Seq( - new MacroCompilerPass( - Some(mems), - libs, - None, - None, - getCostMetric, - if (synflops) MacroCompilerAnnotation.Synflops else MacroCompilerAnnotation.Default - ), - new SynFlopsPass(synflops, libs.getOrElse(mems)), - RemoveEmpty - ) - val result: Circuit = passes.foldLeft(circuit)((c, pass) => pass.run(c)) - result - } - - // Helper method to deal with String + Option[String] - private def concat(a: String, b: String): String = { a + "/" + b } - private def concat(a: String, b: Option[String]): Option[String] = { - b match { - case Some(b2: String) => Some(a + "/" + b2) - case _ => None - } - } -} - -// A collection of standard SRAM generators. -trait HasSRAMGenerator { - import mdf.macrolib._ - - import scala.language.implicitConversions - implicit def Int2SomeInt(i: Int): Option[Int] = Some(i) - implicit def BigInt2SomeBigInt(i: BigInt): Option[BigInt] = Some(i) - - // Generate a standard (read/write/combo) port for testing. - // Helper methods for optional width argument - def generateTestPort( - prefix: String, - width: Option[Int], - depth: Option[BigInt], - maskGran: Option[Int] = None, - read: Boolean, - readEnable: Boolean = false, - write: Boolean, - writeEnable: Boolean = false - ): MacroPort = { - val realPrefix = if (prefix == "") "" else prefix + "_" - - MacroPort( - address = PolarizedPort(name = realPrefix + "addr", polarity = ActiveHigh), - clock = Some(PolarizedPort(name = realPrefix + "clk", polarity = PositiveEdge)), - readEnable = if (readEnable) Some(PolarizedPort(name = realPrefix + "read_en", polarity = ActiveHigh)) else None, - writeEnable = - if (writeEnable) Some(PolarizedPort(name = realPrefix + "write_en", polarity = ActiveHigh)) else None, - output = if (read) Some(PolarizedPort(name = realPrefix + "dout", polarity = ActiveHigh)) else None, - input = if (write) Some(PolarizedPort(name = realPrefix + "din", polarity = ActiveHigh)) else None, - maskPort = maskGran match { - case Some(_: Int) => Some(PolarizedPort(name = realPrefix + "mask", polarity = ActiveHigh)) - case _ => None - }, - maskGran = maskGran, - width = width, - depth = depth // These numbers don't matter here. - ) - } - - // Generate a read port for testing. - def generateReadPort( - prefix: String, - width: Option[Int], - depth: Option[BigInt], - readEnable: Boolean = false - ): MacroPort = { - generateTestPort(prefix, width, depth, write = false, read = true, readEnable = readEnable) - } - - // Generate a write port for testing. - def generateWritePort( - prefix: String, - width: Option[Int], - depth: Option[BigInt], - maskGran: Option[Int] = None, - writeEnable: Boolean = true - ): MacroPort = { - generateTestPort(prefix, width, depth, maskGran = maskGran, write = true, read = false, writeEnable = writeEnable) - } - - // Generate a simple read-write port for testing. - def generateReadWritePort( - prefix: String, - width: Option[Int], - depth: Option[BigInt], - maskGran: Option[Int] = None - ): MacroPort = { - generateTestPort(prefix, width, depth, maskGran = maskGran, write = true, writeEnable = true, read = true) - } - - // Generate a "simple" SRAM (active high/positive edge, 1 read-write port). - def generateSRAM( - name: String, - prefix: String, - width: Int, - depth: BigInt, - maskGran: Option[Int] = None, - extraPorts: Seq[MacroExtraPort] = List() - ): SRAMMacro = { - SRAMMacro( - name = name, - width = width, - depth = depth, - family = "1rw", - ports = Seq(generateReadWritePort(prefix, width, depth, maskGran)), - extraPorts = extraPorts - ) - } - - // Generate a "simple" SRAM group (active high/positive edge, 1 read-write port). - def generateSimpleSRAMGroup( - prefix: String, - mux: Int, - depth: Range, - width: Range, - maskGran: Option[Int] = None - ): SRAMGroup = { - SRAMGroup( - Seq("mygroup_", "width", "x", "depth", "_", "VT"), - "1rw", - Seq("svt", "lvt", "ulvt"), - mux, - depth, - width, - Seq(generateReadWritePort(prefix, None, None, maskGran)) - ) - } - - // 'vt': ('svt','lvt','ulvt'), 'mux': 2, 'depth': range(16,513,8), 'width': range(8,289,2), 'ports': 1 - // 'vt': ('svt','lvt','ulvt'), 'mux': 4, 'depth': range(32,1025,16), 'width': range(4,145), 'ports': 1} - def generateSRAMCompiler(name: String, prefix: String): mdf.macrolib.SRAMCompiler = { - SRAMCompiler( - name, - Seq( - generateSimpleSRAMGroup(prefix, 2, Range(16, 512, 8), Range(8, 288, 2)), - generateSimpleSRAMGroup(prefix, 4, Range(32, 1024, 16), Range(4, 144, 1)) - ) - ) - } -} - -// Generic "simple" test generator. -// Set up scaffolding for generating memories, files, etc. -// Override this generator to specify the expected FIRRTL output. -trait HasSimpleTestGenerator { - this: MacroCompilerSpec with HasSRAMGenerator => - // Override these with "override lazy val". - // Why lazy? These are used in the constructor here so overriding non-lazily - // would be too late. - def useCompiler: Boolean = false - def memWidth: Int - def libWidth: Int - def memDepth: BigInt - def libDepth: BigInt - def memMaskGran: Option[Int] = None - def libMaskGran: Option[Int] = None - def extraPorts: Seq[mdf.macrolib.MacroExtraPort] = List() - def extraTag: String = "" - - // "Effective" libMaskGran by considering write_enable. - val effectiveLibMaskGran: Int = libMaskGran.getOrElse(libWidth) - - // Override this in the sub-generator if you need a more specific name. - // Defaults to using reflection to pull the name of the test using this - // generator. - def generatorType: String = this.getClass.getSimpleName - - //require (memDepth >= libDepth) - - // Convenience variables to check if a mask exists. - val memHasMask: Boolean = memMaskGran.isDefined - val libHasMask: Boolean = libMaskGran.isDefined - // We need to figure out how many mask bits there are in the mem. - val memMaskBits: Int = if (memHasMask) memWidth / memMaskGran.get else 0 - val libMaskBits: Int = if (libHasMask) libWidth / libMaskGran.get else 0 - - val extraTagPrefixed: String = if (extraTag == "") "" else "-" + extraTag - - val mem = s"mem-$generatorType$extraTagPrefixed.json" - val lib = s"lib-$generatorType$extraTagPrefixed.json" - val v = s"$generatorType$extraTagPrefixed.v" - - lazy val mem_name = "target_memory" - val mem_addr_width: Int = MacroCompilerMath.ceilLog2(memDepth) - - lazy val lib_name = "awesome_lib_mem" - val lib_addr_width: Int = MacroCompilerMath.ceilLog2(libDepth) - - // Override these to change the port prefixes if needed. - def libPortPrefix: String = "lib" - def memPortPrefix: String = "outer" - - // These generate "simple" SRAMs (1 masked read-write port) by default, - // but can be overridden if need be. - def generateLibSRAM(): SRAMMacro = generateSRAM(lib_name, libPortPrefix, libWidth, libDepth, libMaskGran, extraPorts) - def generateMemSRAM(): SRAMMacro = generateSRAM(mem_name, memPortPrefix, memWidth, memDepth, memMaskGran) - - def libSRAM: SRAMMacro = generateLibSRAM() - def memSRAM: SRAMMacro = generateMemSRAM() - - def libSRAMs: Seq[SRAMMacro] = Seq(libSRAM) - def memSRAMs: Seq[SRAMMacro] = Seq(memSRAM) - - writeToLib(lib, libSRAMs) - writeToMem(mem, memSRAMs) - - // For masks, width it's a bit tricky since we have to consider cases like - // memMaskGran = 4 and libMaskGran = 8. - // Consider the actually usable libWidth in cases like the above. - val usableLibWidth: Int = - if (memMaskGran.getOrElse(Int.MaxValue) < effectiveLibMaskGran) memMaskGran.get else libWidth - - // Number of lib instances needed to hold the mem, in both directions. - // Round up (e.g. 1.5 instances = effectively 2 instances) - val depthInstances: Int = math.ceil(memDepth.toFloat / libDepth.toFloat).toInt - val widthInstances: Int = math.ceil(memWidth.toFloat / usableLibWidth).toInt - - // Number of width bits in the last width-direction memory. - // e.g. if memWidth = 16 and libWidth = 8, this would be 8 since the last memory 0_1 has 8 bits of input width. - // e.g. if memWidth = 9 and libWidth = 8, this would be 1 since the last memory 0_1 has 1 bit of input width. - lazy val lastWidthBits: Int = if (memWidth % usableLibWidth == 0) usableLibWidth else memWidth % usableLibWidth - lazy val selectBits: Int = mem_addr_width - lib_addr_width - - /** Convenience function to generate a mask statement. - * @param widthInst Width instance (mem_0_x) - * @param depthInst Depth instance (mem_x_0) - */ - def generateMaskStatement(widthInst: Int, depthInst: Int): String = { - // Width of this submemory. - val myMemWidth = if (widthInst == widthInstances - 1) lastWidthBits else usableLibWidth - // Base bit of this submemory. - // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this - // would be 16. - val myBaseBit = usableLibWidth * widthInst - - if (libMaskGran.isDefined) { - if (memMaskGran.isEmpty) { - // If there is no memory mask, we should just turn all the lib mask - // bits high. - s"""mem_${depthInst}_$widthInst.lib_mask <= UInt<$libMaskBits>("h${((1 << libMaskBits) - 1).toHexString}")""" - } else { - // Calculate which bit of outer_mask contains the given bit. - // e.g. if memMaskGran = 2, libMaskGran = 1 and libWidth = 4, then - // calculateMaskBit({0, 1}) = 0 and calculateMaskBit({1, 2}) = 1 - def calculateMaskBit(bit: Int): Int = bit / memMaskGran.getOrElse(memWidth) - - val bitsArr = (libMaskBits - 1 to 0 by -1).map(x => { - if (x * libMaskGran.get > myMemWidth) { - // If we have extra mask bits leftover after the effective width, - // disable those bits. - """UInt<1>("h0")""" - } else { - val outerMaskBit = calculateMaskBit(x * libMaskGran.get + myBaseBit) - s"bits(outer_mask, $outerMaskBit, $outerMaskBit)" - } - }) - val maskVal = bitsArr.reduceRight((bit, rest) => s"cat($bit, $rest)") - s"mem_${depthInst}_$widthInst.lib_mask <= $maskVal" - } - } else "" - } - - /** Helper function to generate a port. - * - * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") - * @param addrWidth Address port width - * @param width data width - * @param write Has a write port? - * @param writeEnable Has a write enable port? - * @param read Has a read port? - * @param readEnable Has a read enable port? - * @param mask Mask granularity (# bits) of the port or None. - * @param extraPorts Extra ports (name, # bits) - */ - def generatePort( - prefix: String, - addrWidth: Int, - width: Int, - write: Boolean, - writeEnable: Boolean, - read: Boolean, - readEnable: Boolean, - mask: Option[Int], - extraPorts: Seq[(String, Int)] = Seq() - ): String = { - val realPrefix = if (prefix == "") "" else prefix + "_" - - val readStr = if (read) s"output ${realPrefix}dout : UInt<$width>" else "" - val writeStr = if (write) s"input ${realPrefix}din : UInt<$width>" else "" - val readEnableStr = if (readEnable) s"input ${realPrefix}read_en : UInt<1>" else "" - val writeEnableStr = if (writeEnable) s"input ${realPrefix}write_en : UInt<1>" else "" - val maskStr = mask match { - case Some(maskBits: Int) => s"input ${realPrefix}mask : UInt<$maskBits>" - case _ => "" - } - val extraPortsStr = extraPorts.map { case (name, bits) => s" input $name : UInt<$bits>" }.mkString("\n") - s""" - input ${realPrefix}addr : UInt<$addrWidth> - input ${realPrefix}clk : Clock - $writeStr - $readStr - $readEnableStr - $writeEnableStr - $maskStr -$extraPortsStr - """ - } - - /** Helper function to generate a RW footer port. - * - * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") - * @param readEnable Has a read enable port? - * @param mask Mask granularity (# bits) of the port or None. - * @param extraPorts Extra ports (name, # bits) - */ - def generateReadWriteFooterPort( - prefix: String, - readEnable: Boolean, - mask: Option[Int], - extraPorts: Seq[(String, Int)] = Seq() - ): String = { - generatePort( - prefix, - lib_addr_width, - libWidth, - write = true, - writeEnable = true, - read = true, - readEnable = readEnable, - mask = mask, - extraPorts = extraPorts - ) - } - - /** Helper function to generate a RW header port. - * @param prefix Memory port prefix (e.g. "x" for ports like "x_clk") - * @param readEnable Has a read enable port? - * @param mask Mask granularity (# bits) of the port or None. - */ - def generateReadWriteHeaderPort(prefix: String, readEnable: Boolean, mask: Option[Int]): String = { - generatePort( - prefix, - mem_addr_width, - memWidth, - write = true, - writeEnable = true, - read = true, - readEnable = readEnable, - mask - ) - } - - // Generate the header memory ports. - def generateHeaderPorts(): String = { - require(memSRAM.ports.size == 1, "Header generator only supports single RW port mem") - generateReadWriteHeaderPort( - memPortPrefix, - memSRAM.ports.head.readEnable.isDefined, - if (memHasMask) Some(memMaskBits) else None - ) - } - - // Generate the header (contains the circuit statement and the target memory - // module. - def generateHeader(): String = { - s""" -circuit $mem_name : - module $mem_name : -${generateHeaderPorts()} - """ - } - - // Generate the target memory ports. - def generateFooterPorts(): String = { - require(libSRAM.ports.size == 1, "Footer generator only supports single RW port mem") - generateReadWriteFooterPort( - libPortPrefix, - libSRAM.ports.head.readEnable.isDefined, - if (libHasMask) Some(libMaskBits) else None, - extraPorts.map(p => (p.name, p.width)) - ) - } - - // Generate the footer (contains the target memory extmodule declaration by default). - def generateFooter(): String = { - s""" - extmodule $lib_name : -${generateFooterPorts()} - - defname = $lib_name - """ - } - - // Abstract method to generate body; to be overridden by specific generator type. - def generateBody(): String - - // Generate the entire output from header, body, and footer. - def generateOutput(): String = { - s""" -${generateHeader()} -${generateBody()} -${generateFooter()} - """ - } - - val output: String = generateOutput() -} - -// Use this trait for tests that invoke the memory compiler without lib. -trait HasNoLibTestGenerator extends HasSimpleTestGenerator { - this: MacroCompilerSpec with HasSRAMGenerator => - - // If there isn't a lib, then the "lib" will become a FIRRTL "mem", which - // in turn becomes synthesized flops. - // Therefore, make "lib" width/depth equal to the mem. - override lazy val libDepth: BigInt = memDepth - override lazy val libWidth: Int = memWidth - override lazy val lib_name: String = mem_name - // Do the same for port names. - override lazy val libPortPrefix: String = memPortPrefix - - // If there is no lib, don't generate a body. - override def generateBody() = "" -} diff --git a/src/test/scala/barstools/macros/Masks.scala b/src/test/scala/barstools/macros/Masks.scala deleted file mode 100644 index 5854eea14..000000000 --- a/src/test/scala/barstools/macros/Masks.scala +++ /dev/null @@ -1,383 +0,0 @@ -package barstools.macros - -// Test the ability of the compiler to deal with various mask combinations. - -trait MasksTestSettings { - this: MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator => - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) -} - -// Try all four different kinds of mask config: -/** Non-masked mem Masked mem - * --------------------------------- - * Non-masked lib | | | - * --------------------------------- - * Masked lib | | | - * --------------------------------- - */ - -class Masks_FourTypes_NonMaskedMem_NonMaskedLib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val memMaskGran: Option[Int] = None - override lazy val libWidth = 8 - override lazy val libMaskGran: Option[Int] = None - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_FourTypes_NonMaskedMem_MaskedLib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val memMaskGran: Option[Int] = None - override lazy val libWidth = 8 - override lazy val libMaskGran: Option[Int] = Some(2) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_FourTypes_MaskedMem_NonMaskedLib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libWidth = 8 - override lazy val libMaskGran: Option[Int] = None - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_FourTypes_MaskedMem_NonMaskedLib_SmallerMaskGran - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val memMaskGran: Option[Int] = Some(4) - override lazy val libWidth = 8 - override lazy val libMaskGran: Option[Int] = None - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_FourTypes_MaskedMem_MaskedLib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libWidth = 16 - override lazy val libMaskGran: Option[Int] = Some(4) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_FourTypes_MaskedMem_MaskedLib_SameMaskGran - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libWidth = 16 - override lazy val libMaskGran: Option[Int] = Some(8) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_FourTypes_MaskedMem_MaskedLib_SmallerMaskGran - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 64 - override lazy val memMaskGran: Option[Int] = Some(4) - override lazy val libWidth = 32 - override lazy val libMaskGran: Option[Int] = Some(8) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -// Bit-mask memories to non-masked libs whose width is larger than 1. - -class Masks_BitMaskedMem_NonMaskedLib extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val memMaskGran: Option[Int] = Some(1) - override lazy val libWidth = 8 - override lazy val libMaskGran: Option[Int] = None - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -// FPGA-style byte-masked memories. - -class Masks_FPGAStyle_32_8 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 32 - override lazy val memMaskGran: Option[Int] = Some(32) - override lazy val libMaskGran: Option[Int] = Some(8) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -// Simple powers of two with bit-masked lib. - -class Masks_PowersOfTwo_8_1 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 64 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_PowersOfTwo_16_1 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 64 - override lazy val memMaskGran: Option[Int] = Some(16) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_PowersOfTwo_32_1 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 64 - override lazy val memMaskGran: Option[Int] = Some(32) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_PowersOfTwo_64_1 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 64 - override lazy val memMaskGran: Option[Int] = Some(64) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -// Simple powers of two with non bit-masked lib. - -class Masks_PowersOfTwo_32_4 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 128 - override lazy val memMaskGran: Option[Int] = Some(32) - override lazy val libMaskGran: Option[Int] = Some(4) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_PowersOfTwo_32_8 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 128 - override lazy val memMaskGran: Option[Int] = Some(32) - override lazy val libMaskGran: Option[Int] = Some(8) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_PowersOfTwo_8_8 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 128 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(8) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -// Width as a multiple of the mask, bit-masked lib - -class Masks_IntegerMaskMultiple_20_10 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 20 - override lazy val memMaskGran: Option[Int] = Some(10) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_IntegerMaskMultiple_21_7 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 21 - override lazy val memMaskGran: Option[Int] = Some(21) - override lazy val libMaskGran: Option[Int] = Some(7) - - (it should "be enabled when non-power of two masks are supported").is(pending) - //~ compileExecuteAndTest(mem, lib, v, output) -} - -class Masks_IntegerMaskMultiple_21_21 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 21 - override lazy val memMaskGran: Option[Int] = Some(21) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_IntegerMaskMultiple_84_21 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 84 - override lazy val memMaskGran: Option[Int] = Some(21) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_IntegerMaskMultiple_92_23 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 92 - override lazy val memMaskGran: Option[Int] = Some(23) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_IntegerMaskMultiple_117_13 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 117 - override lazy val memMaskGran: Option[Int] = Some(13) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_IntegerMaskMultiple_160_20 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 160 - override lazy val memMaskGran: Option[Int] = Some(20) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class Masks_IntegerMaskMultiple_184_23 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 184 - override lazy val memMaskGran: Option[Int] = Some(23) - override lazy val libMaskGran: Option[Int] = Some(1) - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -// Width as an non-integer multiple of the mask, bit-masked lib - -class Masks_NonIntegerMaskMultiple_32_3 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with MasksTestSettings { - override lazy val width = 32 - override lazy val memMaskGran: Option[Int] = Some(3) - override lazy val libMaskGran: Option[Int] = Some(1) - - (it should "be enabled when non-power of two masks are supported").is(pending) - //~ compileExecuteAndTest(mem, lib, v, output) -} diff --git a/src/test/scala/barstools/macros/MultiPort.scala b/src/test/scala/barstools/macros/MultiPort.scala deleted file mode 100644 index 07903e0ab..000000000 --- a/src/test/scala/barstools/macros/MultiPort.scala +++ /dev/null @@ -1,500 +0,0 @@ -package barstools.macros - -// Test that the memory compiler works fine for compiling multi-port memories. -// TODO: extend test generator to also automatically generate multi-ported memories. - -class SplitWidth_2rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - import mdf.macrolib._ - - override lazy val depth = BigInt(1024) - override lazy val memWidth = 64 - override lazy val memMaskGran: Option[Int] = Some(16) - override lazy val libWidth = 16 - - override def generateMemSRAM(): SRAMMacro = { - SRAMMacro( - name = mem_name, - width = memWidth, - depth = memDepth, - family = "2rw", - ports = Seq( - generateTestPort( - "portA", - memWidth, - Some(memDepth), - maskGran = memMaskGran, - write = true, - writeEnable = true, - read = true, - readEnable = true - ), - generateTestPort( - "portB", - memWidth, - Some(memDepth), - maskGran = memMaskGran, - write = true, - writeEnable = true, - read = true, - readEnable = true - ) - ) - ) - } - - override def generateLibSRAM(): SRAMMacro = { - SRAMMacro( - name = lib_name, - width = libWidth, - depth = libDepth, - family = "2rw", - ports = Seq( - generateTestPort( - "portA", - libWidth, - libDepth, - write = true, - writeEnable = true, - read = true, - readEnable = true - ), - generateTestPort( - "portB", - libWidth, - libDepth, - write = true, - writeEnable = true, - read = true, - readEnable = true - ) - ) - ) - } - - override def generateHeaderPorts(): String = { - generateReadWriteHeaderPort("portA", readEnable = true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort( - "portB", - readEnable = true, - Some(memMaskBits) - ) - } - - override def generateFooterPorts(): String = { - generateReadWriteFooterPort("portA", readEnable = true, None) + "\n" + generateReadWriteFooterPort( - "portB", - readEnable = true, - None - ) - } - - override def generateBody() = - """ - inst mem_0_0 of awesome_lib_mem - inst mem_0_1 of awesome_lib_mem - inst mem_0_2 of awesome_lib_mem - inst mem_0_3 of awesome_lib_mem - mem_0_0.portA_clk <= portA_clk - mem_0_0.portA_addr <= portA_addr - node portA_dout_0_0 = bits(mem_0_0.portA_dout, 15, 0) - mem_0_0.portA_din <= bits(portA_din, 15, 0) - mem_0_0.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_0.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 0, 0)), UInt<1>("h1")) - mem_0_1.portA_clk <= portA_clk - mem_0_1.portA_addr <= portA_addr - node portA_dout_0_1 = bits(mem_0_1.portA_dout, 15, 0) - mem_0_1.portA_din <= bits(portA_din, 31, 16) - mem_0_1.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_1.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 1, 1)), UInt<1>("h1")) - mem_0_2.portA_clk <= portA_clk - mem_0_2.portA_addr <= portA_addr - node portA_dout_0_2 = bits(mem_0_2.portA_dout, 15, 0) - mem_0_2.portA_din <= bits(portA_din, 47, 32) - mem_0_2.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_2.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 2, 2)), UInt<1>("h1")) - mem_0_3.portA_clk <= portA_clk - mem_0_3.portA_addr <= portA_addr - node portA_dout_0_3 = bits(mem_0_3.portA_dout, 15, 0) - mem_0_3.portA_din <= bits(portA_din, 63, 48) - mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_3.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 3, 3)), UInt<1>("h1")) - node portA_dout_0 = cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))) - mem_0_0.portB_clk <= portB_clk - mem_0_0.portB_addr <= portB_addr - node portB_dout_0_0 = bits(mem_0_0.portB_dout, 15, 0) - mem_0_0.portB_din <= bits(portB_din, 15, 0) - mem_0_0.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_0.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 0, 0)), UInt<1>("h1")) - mem_0_1.portB_clk <= portB_clk - mem_0_1.portB_addr <= portB_addr - node portB_dout_0_1 = bits(mem_0_1.portB_dout, 15, 0) - mem_0_1.portB_din <= bits(portB_din, 31, 16) - mem_0_1.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_1.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 1, 1)), UInt<1>("h1")) - mem_0_2.portB_clk <= portB_clk - mem_0_2.portB_addr <= portB_addr - node portB_dout_0_2 = bits(mem_0_2.portB_dout, 15, 0) - mem_0_2.portB_din <= bits(portB_din, 47, 32) - mem_0_2.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_2.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 2, 2)), UInt<1>("h1")) - mem_0_3.portB_clk <= portB_clk - mem_0_3.portB_addr <= portB_addr - node portB_dout_0_3 = bits(mem_0_3.portB_dout, 15, 0) - mem_0_3.portB_din <= bits(portB_din, 63, 48) - mem_0_3.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_3.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 3, 3)), UInt<1>("h1")) - node portB_dout_0 = cat(portB_dout_0_3, cat(portB_dout_0_2, cat(portB_dout_0_1, portB_dout_0_0))) - portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<64>("h0")) - portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<64>("h0")) -""" - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth_1r_1w extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - import mdf.macrolib._ - - override lazy val depth = BigInt(1024) - override lazy val memWidth = 64 - override lazy val memMaskGran: Option[Int] = Some(16) - override lazy val libWidth = 16 - - override def generateMemSRAM(): SRAMMacro = { - SRAMMacro( - name = mem_name, - width = memWidth, - depth = memDepth, - family = "1r1w", - ports = Seq( - generateTestPort( - "portA", - memWidth, - Some(memDepth), - maskGran = memMaskGran, - write = false, - read = true, - readEnable = true - ), - generateTestPort( - "portB", - memWidth, - Some(memDepth), - maskGran = memMaskGran, - write = true, - writeEnable = true, - read = false - ) - ) - ) - } - - override def generateLibSRAM(): SRAMMacro = { - SRAMMacro( - name = lib_name, - width = libWidth, - depth = libDepth, - family = "1r1w", - ports = Seq( - generateTestPort( - "portA", - libWidth, - libDepth, - write = false, - read = true, - readEnable = true - ), - generateTestPort("portB", libWidth, libDepth, write = true, writeEnable = true, read = false) - ) - ) - } - - override def generateHeaderPorts(): String = { - generatePort( - "portA", - mem_addr_width, - memWidth, - write = false, - writeEnable = false, - read = true, - readEnable = true, - Some(memMaskBits) - ) + "\n" + - generatePort( - "portB", - mem_addr_width, - memWidth, - write = true, - writeEnable = true, - read = false, - readEnable = false, - Some(memMaskBits) - ) - } - - override def generateFooterPorts(): String = { - generatePort( - "portA", - lib_addr_width, - libWidth, - write = false, - writeEnable = false, - read = true, - readEnable = true, - None - ) + "\n" + - generatePort( - "portB", - lib_addr_width, - libWidth, - write = true, - writeEnable = true, - read = false, - readEnable = false, - None - ) - } - - override def generateBody() = - """ - inst mem_0_0 of awesome_lib_mem - inst mem_0_1 of awesome_lib_mem - inst mem_0_2 of awesome_lib_mem - inst mem_0_3 of awesome_lib_mem - mem_0_0.portB_clk <= portB_clk - mem_0_0.portB_addr <= portB_addr - mem_0_0.portB_din <= bits(portB_din, 15, 0) - mem_0_0.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 0, 0)), UInt<1>("h1")) - mem_0_1.portB_clk <= portB_clk - mem_0_1.portB_addr <= portB_addr - mem_0_1.portB_din <= bits(portB_din, 31, 16) - mem_0_1.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 1, 1)), UInt<1>("h1")) - mem_0_2.portB_clk <= portB_clk - mem_0_2.portB_addr <= portB_addr - mem_0_2.portB_din <= bits(portB_din, 47, 32) - mem_0_2.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 2, 2)), UInt<1>("h1")) - mem_0_3.portB_clk <= portB_clk - mem_0_3.portB_addr <= portB_addr - mem_0_3.portB_din <= bits(portB_din, 63, 48) - mem_0_3.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 3, 3)), UInt<1>("h1")) - mem_0_0.portA_clk <= portA_clk - mem_0_0.portA_addr <= portA_addr - node portA_dout_0_0 = bits(mem_0_0.portA_dout, 15, 0) - mem_0_0.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_1.portA_clk <= portA_clk - mem_0_1.portA_addr <= portA_addr - node portA_dout_0_1 = bits(mem_0_1.portA_dout, 15, 0) - mem_0_1.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_2.portA_clk <= portA_clk - mem_0_2.portA_addr <= portA_addr - node portA_dout_0_2 = bits(mem_0_2.portA_dout, 15, 0) - mem_0_2.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_3.portA_clk <= portA_clk - mem_0_3.portA_addr <= portA_addr - node portA_dout_0_3 = bits(mem_0_3.portA_dout, 15, 0) - mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - node portA_dout_0 = cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))) - portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<64>("h0")) -""" - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth_2rw_differentMasks extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - import mdf.macrolib._ - - override lazy val depth = BigInt(1024) - override lazy val memWidth = 64 - override lazy val memMaskGran: Option[Int] = Some(16) - override lazy val libWidth = 16 - - lazy val memMaskGranB = 8 // these generators are run at constructor time - - override def generateMemSRAM(): SRAMMacro = { - SRAMMacro( - name = mem_name, - width = memWidth, - depth = memDepth, - family = "2rw", - ports = Seq( - generateTestPort( - "portA", - memWidth, - Some(memDepth), - maskGran = memMaskGran, - write = true, - writeEnable = true, - read = true, - readEnable = true - ), - generateTestPort( - "portB", - memWidth, - Some(memDepth), - maskGran = Some(memMaskGranB), - write = true, - writeEnable = true, - read = true, - readEnable = true - ) - ) - ) - } - - override def generateLibSRAM(): SRAMMacro = { - SRAMMacro( - name = lib_name, - width = libWidth, - depth = libDepth, - family = "2rw", - ports = Seq( - generateTestPort( - "portA", - libWidth, - libDepth, - write = true, - writeEnable = true, - read = true, - readEnable = true - ), - generateTestPort( - "portB", - libWidth, - libDepth, - write = true, - writeEnable = true, - read = true, - readEnable = true - ) - ) - ) - } - - override def generateHeaderPorts(): String = { - generateReadWriteHeaderPort("portA", readEnable = true, Some(memMaskBits)) + "\n" + generateReadWriteHeaderPort( - "portB", - readEnable = true, - Some(memWidth / memMaskGranB) - ) - } - - override def generateFooterPorts(): String = { - generateReadWriteFooterPort("portA", readEnable = true, None) + "\n" + generateReadWriteFooterPort( - "portB", - readEnable = true, - None - ) - } - - override def generateBody() = - """ - inst mem_0_0 of awesome_lib_mem - inst mem_0_1 of awesome_lib_mem - inst mem_0_2 of awesome_lib_mem - inst mem_0_3 of awesome_lib_mem - inst mem_0_4 of awesome_lib_mem - inst mem_0_5 of awesome_lib_mem - inst mem_0_6 of awesome_lib_mem - inst mem_0_7 of awesome_lib_mem - mem_0_0.portA_clk <= portA_clk - mem_0_0.portA_addr <= portA_addr - node portA_dout_0_0 = bits(mem_0_0.portA_dout, 7, 0) - mem_0_0.portA_din <= bits(portA_din, 7, 0) - mem_0_0.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_0.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 0, 0)), UInt<1>("h1")) - mem_0_1.portA_clk <= portA_clk - mem_0_1.portA_addr <= portA_addr - node portA_dout_0_1 = bits(mem_0_1.portA_dout, 7, 0) - mem_0_1.portA_din <= bits(portA_din, 15, 8) - mem_0_1.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_1.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 0, 0)), UInt<1>("h1")) - mem_0_2.portA_clk <= portA_clk - mem_0_2.portA_addr <= portA_addr - node portA_dout_0_2 = bits(mem_0_2.portA_dout, 7, 0) - mem_0_2.portA_din <= bits(portA_din, 23, 16) - mem_0_2.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_2.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 1, 1)), UInt<1>("h1")) - mem_0_3.portA_clk <= portA_clk - mem_0_3.portA_addr <= portA_addr - node portA_dout_0_3 = bits(mem_0_3.portA_dout, 7, 0) - mem_0_3.portA_din <= bits(portA_din, 31, 24) - mem_0_3.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_3.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 1, 1)), UInt<1>("h1")) - mem_0_4.portA_clk <= portA_clk - mem_0_4.portA_addr <= portA_addr - node portA_dout_0_4 = bits(mem_0_4.portA_dout, 7, 0) - mem_0_4.portA_din <= bits(portA_din, 39, 32) - mem_0_4.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_4.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 2, 2)), UInt<1>("h1")) - mem_0_5.portA_clk <= portA_clk - mem_0_5.portA_addr <= portA_addr - node portA_dout_0_5 = bits(mem_0_5.portA_dout, 7, 0) - mem_0_5.portA_din <= bits(portA_din, 47, 40) - mem_0_5.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_5.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 2, 2)), UInt<1>("h1")) - mem_0_6.portA_clk <= portA_clk - mem_0_6.portA_addr <= portA_addr - node portA_dout_0_6 = bits(mem_0_6.portA_dout, 7, 0) - mem_0_6.portA_din <= bits(portA_din, 55, 48) - mem_0_6.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_6.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 3, 3)), UInt<1>("h1")) - mem_0_7.portA_clk <= portA_clk - mem_0_7.portA_addr <= portA_addr - node portA_dout_0_7 = bits(mem_0_7.portA_dout, 7, 0) - mem_0_7.portA_din <= bits(portA_din, 63, 56) - mem_0_7.portA_read_en <= and(portA_read_en, UInt<1>("h1")) - mem_0_7.portA_write_en <= and(and(and(portA_write_en, UInt<1>("h1")), bits(portA_mask, 3, 3)), UInt<1>("h1")) - node portA_dout_0 = cat(portA_dout_0_7, cat(portA_dout_0_6, cat(portA_dout_0_5, cat(portA_dout_0_4, cat(portA_dout_0_3, cat(portA_dout_0_2, cat(portA_dout_0_1, portA_dout_0_0))))))) - mem_0_0.portB_clk <= portB_clk - mem_0_0.portB_addr <= portB_addr - node portB_dout_0_0 = bits(mem_0_0.portB_dout, 7, 0) - mem_0_0.portB_din <= bits(portB_din, 7, 0) - mem_0_0.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_0.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 0, 0)), UInt<1>("h1")) - mem_0_1.portB_clk <= portB_clk - mem_0_1.portB_addr <= portB_addr - node portB_dout_0_1 = bits(mem_0_1.portB_dout, 7, 0) - mem_0_1.portB_din <= bits(portB_din, 15, 8) - mem_0_1.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_1.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 1, 1)), UInt<1>("h1")) - mem_0_2.portB_clk <= portB_clk - mem_0_2.portB_addr <= portB_addr - node portB_dout_0_2 = bits(mem_0_2.portB_dout, 7, 0) - mem_0_2.portB_din <= bits(portB_din, 23, 16) - mem_0_2.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_2.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 2, 2)), UInt<1>("h1")) - mem_0_3.portB_clk <= portB_clk - mem_0_3.portB_addr <= portB_addr - node portB_dout_0_3 = bits(mem_0_3.portB_dout, 7, 0) - mem_0_3.portB_din <= bits(portB_din, 31, 24) - mem_0_3.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_3.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 3, 3)), UInt<1>("h1")) - mem_0_4.portB_clk <= portB_clk - mem_0_4.portB_addr <= portB_addr - node portB_dout_0_4 = bits(mem_0_4.portB_dout, 7, 0) - mem_0_4.portB_din <= bits(portB_din, 39, 32) - mem_0_4.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_4.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 4, 4)), UInt<1>("h1")) - mem_0_5.portB_clk <= portB_clk - mem_0_5.portB_addr <= portB_addr - node portB_dout_0_5 = bits(mem_0_5.portB_dout, 7, 0) - mem_0_5.portB_din <= bits(portB_din, 47, 40) - mem_0_5.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_5.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 5, 5)), UInt<1>("h1")) - mem_0_6.portB_clk <= portB_clk - mem_0_6.portB_addr <= portB_addr - node portB_dout_0_6 = bits(mem_0_6.portB_dout, 7, 0) - mem_0_6.portB_din <= bits(portB_din, 55, 48) - mem_0_6.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_6.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 6, 6)), UInt<1>("h1")) - mem_0_7.portB_clk <= portB_clk - mem_0_7.portB_addr <= portB_addr - node portB_dout_0_7 = bits(mem_0_7.portB_dout, 7, 0) - mem_0_7.portB_din <= bits(portB_din, 63, 56) - mem_0_7.portB_read_en <= and(portB_read_en, UInt<1>("h1")) - mem_0_7.portB_write_en <= and(and(and(portB_write_en, UInt<1>("h1")), bits(portB_mask, 7, 7)), UInt<1>("h1")) - node portB_dout_0 = cat(portB_dout_0_7, cat(portB_dout_0_6, cat(portB_dout_0_5, cat(portB_dout_0_4, cat(portB_dout_0_3, cat(portB_dout_0_2, cat(portB_dout_0_1, portB_dout_0_0))))))) - portA_dout <= mux(UInt<1>("h1"), portA_dout_0, UInt<64>("h0")) - portB_dout <= mux(UInt<1>("h1"), portB_dout_0, UInt<64>("h0")) -""" - - compileExecuteAndTest(mem, lib, v, output) -} diff --git a/src/test/scala/barstools/macros/SRAMCompiler.scala b/src/test/scala/barstools/macros/SRAMCompiler.scala deleted file mode 100644 index 750283cea..000000000 --- a/src/test/scala/barstools/macros/SRAMCompiler.scala +++ /dev/null @@ -1,21 +0,0 @@ -package barstools.macros - -import mdf.macrolib - -class SRAMCompiler extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - val compiler: macrolib.SRAMCompiler = generateSRAMCompiler("awesome", "A") - val verilog = s"v-SRAMCompiler.v" - override lazy val depth = BigInt(16) - override lazy val memWidth = 8 - override lazy val libWidth = 8 - override lazy val mem_name = "mymem" - override lazy val memPortPrefix = "X" - override lazy val lib_name = "mygroup_8x16_SVT" - override lazy val libPortPrefix = "A" - - writeToLib(lib, Seq(compiler)) - - writeToMem(mem, Seq(generateSRAM("mymem", "X", 8, 16))) - - compileExecuteAndTest(mem, Some(lib), verilog, output = output, useCompiler = true) -} diff --git a/src/test/scala/barstools/macros/SimpleSplitDepth.scala b/src/test/scala/barstools/macros/SimpleSplitDepth.scala deleted file mode 100644 index 7e02bc1f2..000000000 --- a/src/test/scala/barstools/macros/SimpleSplitDepth.scala +++ /dev/null @@ -1,638 +0,0 @@ -package barstools.macros - -// Test the depth splitting aspect of the memory compiler. -// This file is for simple tests: one read-write port, powers of two sizes, etc. -// For example, implementing a 4096x32 memory using four 1024x32 memories. - -trait HasSimpleDepthTestGenerator extends HasSimpleTestGenerator { - this: MacroCompilerSpec with HasSRAMGenerator => - def width: Int - - override lazy val memWidth: Int = width - override lazy val libWidth: Int = width - - // Generate a depth-splitting body. - override def generateBody(): String = { - val output = new StringBuilder - - if (selectBits > 0) { - output.append( - s""" - node ${memPortPrefix}_addr_sel = bits(${memPortPrefix}_addr, ${mem_addr_width - 1}, $lib_addr_width) - reg ${memPortPrefix}_addr_sel_reg : UInt<$selectBits>, ${memPortPrefix}_clk with : - reset => (UInt<1>("h0"), ${memPortPrefix}_addr_sel_reg) - ${memPortPrefix}_addr_sel_reg <= mux(UInt<1>("h1"), ${memPortPrefix}_addr_sel, ${memPortPrefix}_addr_sel_reg) -""" - ) - } - - for (i <- 0 until depthInstances) { - - val maskStatement = generateMaskStatement(0, i) - val enableIdentifier = - if (selectBits > 0) s"""eq(${memPortPrefix}_addr_sel, UInt<$selectBits>("h${i.toHexString}"))""" - else "UInt<1>(\"h1\")" - val chipEnable = s"""UInt<1>("h1")""" - val writeEnable = - if (memMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, $chipEnable)" else s"${memPortPrefix}_write_en" - output.append( - s""" - inst mem_${i}_0 of $lib_name - mem_${i}_0.${libPortPrefix}_clk <= ${memPortPrefix}_clk - mem_${i}_0.${libPortPrefix}_addr <= ${memPortPrefix}_addr - node ${memPortPrefix}_dout_${i}_0 = bits(mem_${i}_0.${libPortPrefix}_dout, ${width - 1}, 0) - mem_${i}_0.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${width - 1}, 0) - $maskStatement - mem_${i}_0.${libPortPrefix}_write_en <= and(and($writeEnable, UInt<1>("h1")), $enableIdentifier) - node ${memPortPrefix}_dout_$i = ${memPortPrefix}_dout_${i}_0 - """ - ) - } - def generate_outer_dout_tree(i: Int, depthInstances: Int): String = { - if (i > depthInstances - 1) { - s"""UInt<$libWidth>("h0")""" - } else { - s"""mux(eq(${memPortPrefix}_addr_sel_reg, UInt<%d>("h%s")), ${memPortPrefix}_dout_%d, %s)""".format( - selectBits, - i.toHexString, - i, - generate_outer_dout_tree(i + 1, depthInstances) - ) - } - } - output.append(s" ${memPortPrefix}_dout <= ") - if (selectBits > 0) { - output.append(generate_outer_dout_tree(0, depthInstances)) - } else { - output.append(s"""mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<$libWidth>("h0"))""") - } - - output.toString - } -} - -// Try different widths -class SplitDepth4096x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(4096) - override lazy val libDepth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth4096x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 16 - override lazy val memDepth = BigInt(4096) - override lazy val libDepth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth32768x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 8 - override lazy val memDepth = BigInt(32768) - override lazy val libDepth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth4096x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 8 - override lazy val memDepth = BigInt(4096) - override lazy val libDepth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth2048x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 8 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 8 - override lazy val memDepth = BigInt(1024) - override lazy val libDepth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output) -} - -// Non power of two -class SplitDepth2000x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 8 - override lazy val memDepth = BigInt(2000) - override lazy val libDepth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth2049x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 8 - override lazy val memDepth = BigInt(2049) - override lazy val libDepth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output) -} - -// Masked RAMs - -// Test for mem mask == lib mask (i.e. mask is a write enable bit) -class SplitDepth2048x32_mrw_lib32 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(32) - override lazy val libMaskGran: Option[Int] = Some(32) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth2048x8_mrw_lib8 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 8 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(8) - - compileExecuteAndTest(mem, lib, v, output) -} - -// Non-bit level mask -class SplitDepth2048x64_mrw_mem32_lib8 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator { - override lazy val width = 64 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(32) - override lazy val libMaskGran: Option[Int] = Some(8) - - compileExecuteAndTest(mem, lib, v, output) -} - -// Bit level mask -class SplitDepth2048x32_mrw_mem16_lib1 - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(16) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth2048x32_mrw_mem8_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth2048x32_mrw_mem4_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(4) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth2048x32_mrw_mem2_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(2) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -// Non-powers of 2 mask sizes -class SplitDepth2048x32_mrw_mem3_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(3) - override lazy val libMaskGran: Option[Int] = Some(1) - - (it should "be enabled when non-power of two masks are supported").is(pending) - //compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth2048x32_mrw_mem7_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(7) - override lazy val libMaskGran: Option[Int] = Some(1) - - (it should "be enabled when non-power of two masks are supported").is(pending) - //compileExecuteAndTest(mem, lib, v, output) -} - -class SplitDepth2048x32_mrw_mem9_lib1 extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val memMaskGran: Option[Int] = Some(9) - override lazy val libMaskGran: Option[Int] = Some(1) - - (it should "be enabled when non-power of two masks are supported").is(pending) - //compileExecuteAndTest(mem, lib, v, output) -} - -// Try an extra port -class SplitDepth2048x8_extraPort extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - import mdf.macrolib._ - - override lazy val width = 8 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val extraPorts = List( - MacroExtraPort(name = "extra_port", width = 8, portType = Constant, value = 0xff) - ) - override lazy val extraTag = "extraPort" - - override def generateOutput(): String = - """ -circuit target_memory : - module target_memory : - input outer_addr : UInt<11> - input outer_clk : Clock - input outer_din : UInt<8> - output outer_dout : UInt<8> - input outer_write_en : UInt<1> - - node outer_addr_sel = bits(outer_addr, 10, 10) - reg outer_addr_sel_reg : UInt<1>, outer_clk with : - reset => (UInt<1>("h0"), outer_addr_sel_reg) - outer_addr_sel_reg <= mux(UInt<1>("h1"), outer_addr_sel, outer_addr_sel_reg) - - inst mem_0_0 of awesome_lib_mem - mem_0_0.extra_port <= UInt<8>("hff") - mem_0_0.lib_clk <= outer_clk - mem_0_0.lib_addr <= outer_addr - node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) - mem_0_0.lib_din <= bits(outer_din, 7, 0) - - mem_0_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h0"))) - node outer_dout_0 = outer_dout_0_0 - - inst mem_1_0 of awesome_lib_mem - mem_1_0.extra_port <= UInt<8>("hff") - mem_1_0.lib_clk <= outer_clk - mem_1_0.lib_addr <= outer_addr - node outer_dout_1_0 = bits(mem_1_0.lib_dout, 7, 0) - mem_1_0.lib_din <= bits(outer_din, 7, 0) - - mem_1_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outer_addr_sel, UInt<1>("h1"))) - node outer_dout_1 = outer_dout_1_0 - outer_dout <= mux(eq(outer_addr_sel_reg, UInt<1>("h0")), outer_dout_0, mux(eq(outer_addr_sel_reg, UInt<1>("h1")), outer_dout_1, UInt<8>("h0"))) - extmodule awesome_lib_mem : - input lib_addr : UInt<10> - input lib_clk : Clock - input lib_din : UInt<8> - output lib_dout : UInt<8> - input lib_write_en : UInt<1> - input extra_port : UInt<8> - - defname = awesome_lib_mem - """ - - compileExecuteAndTest(mem, lib, v, output) -} - -// Split read and (non-masked) write ports (r+w). -class SplitDepth_SplitPortsNonMasked extends MacroCompilerSpec with HasSRAMGenerator { - lazy val width = 8 - lazy val memDepth = BigInt(2048) - lazy val libDepth = BigInt(1024) - - override val memPrefix: String = testDir - override val libPrefix: String = testDir - - import mdf.macrolib._ - - "Non-masked split lib; split mem" should "split fine" in { - val lib = "lib-split_depth-r-w-split-lib-split-mem.json" - val mem = "mem-split_depth-r-w-split-lib-split-mem.json" - val v = "split_depth-r-w-split-lib-split-mem.v" - - val libMacro = SRAMMacro( - name = "awesome_lib_mem", - width = width, - depth = libDepth, - family = "1r1w", - ports = Seq( - generateReadPort("innerA", width, libDepth), - generateWritePort("innerB", width, libDepth) - ) - ) - - val memMacro = SRAMMacro( - name = "target_memory", - width = width, - depth = memDepth, - family = "1r1w", - ports = Seq( - generateReadPort("outerB", width, memDepth), - generateWritePort("outerA", width, memDepth) - ) - ) - - writeToLib(mem, Seq(memMacro)) - writeToLib(lib, Seq(libMacro)) - - val output = - """ -circuit target_memory : - module target_memory : - input outerB_addr : UInt<11> - input outerB_clk : Clock - output outerB_dout : UInt<8> - input outerA_addr : UInt<11> - input outerA_clk : Clock - input outerA_din : UInt<8> - input outerA_write_en : UInt<1> - - node outerB_addr_sel = bits(outerB_addr, 10, 10) - reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : - reset => (UInt<1>("h0"), outerB_addr_sel_reg) - outerB_addr_sel_reg <= mux(UInt<1>("h1"), outerB_addr_sel, outerB_addr_sel_reg) - node outerA_addr_sel = bits(outerA_addr, 10, 10) - inst mem_0_0 of awesome_lib_mem - mem_0_0.innerB_clk <= outerA_clk - mem_0_0.innerB_addr <= outerA_addr - mem_0_0.innerB_din <= bits(outerA_din, 7, 0) - mem_0_0.innerB_write_en <= and(and(and(outerA_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) - mem_0_0.innerA_clk <= outerB_clk - mem_0_0.innerA_addr <= outerB_addr - node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) - node outerB_dout_0 = outerB_dout_0_0 - inst mem_1_0 of awesome_lib_mem - mem_1_0.innerB_clk <= outerA_clk - mem_1_0.innerB_addr <= outerA_addr - mem_1_0.innerB_din <= bits(outerA_din, 7, 0) - mem_1_0.innerB_write_en <= and(and(and(outerA_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) - mem_1_0.innerA_clk <= outerB_clk - mem_1_0.innerA_addr <= outerB_addr - node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) - node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) - - extmodule awesome_lib_mem : - input innerA_addr : UInt<10> - input innerA_clk : Clock - output innerA_dout : UInt<8> - input innerB_addr : UInt<10> - input innerB_clk : Clock - input innerB_din : UInt<8> - input innerB_write_en : UInt<1> - - defname = awesome_lib_mem -""" - - compileExecuteAndTest(mem, lib, v, output) - } - - "Non-masked regular lib; split mem" should "split fine" in { - // Enable this test when the memory compiler can compile non-matched - // memories (e.g. mrw mem and r+mw lib). - // Right now all we can get is a "port count must match" error. - pending - - val lib = "lib-split_depth-r-w-regular-lib-split-mem.json" - val mem = "mem-split_depth-r-w-regular-lib-split-mem.json" - val v = "split_depth-r-w-regular-lib-split-mem.v" - - val memMacro = SRAMMacro( - name = "target_memory", - width = width, - depth = memDepth, - family = "1r1w", - ports = Seq( - generateReadPort("outerB", width, memDepth), - generateWritePort("outerA", width, memDepth) - ) - ) - - writeToLib(mem, Seq(memMacro)) - writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, libDepth))) - - val output = - """ -TODO -""" - - compileExecuteAndTest(mem, lib, v, output) - } - - "Non-masked split lib; regular mem" should "split fine" in { - // Enable this test when the memory compiler can compile non-matched - // memories (e.g. mrw mem and r+mw lib). - // Right now all we can get is a "port count must match" error. - // [edwardw]: does this even make sense? Can we compile a 2-ported memory using 1-ported memories? - pending - - val lib = "lib-split_depth-r-w-split-lib-regular-mem.json" - val mem = "mem-split_depth-r-w-split-lib-regular-mem.json" - val v = "split_depth-r-w-split-lib-regular-mem.v" - - val libMacro = SRAMMacro( - name = "awesome_lib_mem", - width = width, - depth = libDepth, - family = "1rw", - ports = Seq( - generateReadPort("innerA", width, libDepth), - generateWritePort("innerB", width, libDepth) - ) - ) - - writeToLib(mem, Seq(generateSRAM("target_memory", "outer", width, memDepth))) - writeToLib(lib, Seq(libMacro)) - - val output = - """ -TODO -""" - - compileExecuteAndTest(mem, lib, v, output) - } -} - -// Split read and (masked) write ports (r+mw). -class SplitDepth_SplitPortsMasked extends MacroCompilerSpec with HasSRAMGenerator { - lazy val width = 8 - lazy val memDepth = BigInt(2048) - lazy val libDepth = BigInt(1024) - lazy val memMaskGran: Option[Int] = Some(8) - lazy val libMaskGran: Option[Int] = Some(1) - - override val memPrefix: String = testDir - override val libPrefix: String = testDir - - import mdf.macrolib._ - - "Masked split lib; split mem" should "split fine" in { - val lib = "lib-split_depth-r-mw-split-lib-split-mem.json" - val mem = "mem-split_depth-r-mw-split-lib-split-mem.json" - val v = "split_depth-r-mw-split-lib-split-mem.v" - - val libMacro = SRAMMacro( - name = "awesome_lib_mem", - width = width, - depth = libDepth, - family = "1r1w", - ports = Seq( - generateReadPort("innerA", width, libDepth), - generateWritePort("innerB", width, libDepth, libMaskGran) - ) - ) - - val memMacro = SRAMMacro( - name = "target_memory", - width = width, - depth = memDepth, - family = "1r1w", - ports = Seq( - generateReadPort("outerB", width, memDepth), - generateWritePort("outerA", width, memDepth, memMaskGran) - ) - ) - - writeToLib(mem, Seq(memMacro)) - writeToLib(lib, Seq(libMacro)) - - val output = - """ -circuit target_memory : - module target_memory : - input outerB_addr : UInt<11> - input outerB_clk : Clock - output outerB_dout : UInt<8> - input outerA_addr : UInt<11> - input outerA_clk : Clock - input outerA_din : UInt<8> - input outerA_write_en : UInt<1> - input outerA_mask : UInt<1> - - node outerB_addr_sel = bits(outerB_addr, 10, 10) - reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : - reset => (UInt<1>("h0"), outerB_addr_sel_reg) - outerB_addr_sel_reg <= mux(UInt<1>("h1"), outerB_addr_sel, outerB_addr_sel_reg) - node outerA_addr_sel = bits(outerA_addr, 10, 10) - inst mem_0_0 of awesome_lib_mem - mem_0_0.innerB_clk <= outerA_clk - mem_0_0.innerB_addr <= outerA_addr - mem_0_0.innerB_din <= bits(outerA_din, 7, 0) - mem_0_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) - mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) - mem_0_0.innerA_clk <= outerB_clk - mem_0_0.innerA_addr <= outerB_addr - node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) - node outerB_dout_0 = outerB_dout_0_0 - inst mem_1_0 of awesome_lib_mem - mem_1_0.innerB_clk <= outerA_clk - mem_1_0.innerB_addr <= outerA_addr - mem_1_0.innerB_din <= bits(outerA_din, 7, 0) - mem_1_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) - mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) - mem_1_0.innerA_clk <= outerB_clk - mem_1_0.innerA_addr <= outerB_addr - node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) - node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) - - extmodule awesome_lib_mem : - input innerA_addr : UInt<10> - input innerA_clk : Clock - output innerA_dout : UInt<8> - input innerB_addr : UInt<10> - input innerB_clk : Clock - input innerB_din : UInt<8> - input innerB_write_en : UInt<1> - input innerB_mask : UInt<8> - - defname = awesome_lib_mem -""" - - compileExecuteAndTest(mem, lib, v, output) - } - - "Non-masked regular lib; split mem" should "split fine" in { - // Enable this test when the memory compiler can compile non-matched - // memories (e.g. mrw mem and r+mw lib). - // Right now all we can get is a "port count must match" error. - pending - - val lib = "lib-split_depth-r-mw-regular-lib-split-mem.json" - val mem = "mem-split_depth-r-mw-regular-lib-split-mem.json" - val v = "split_depth-r-mw-regular-lib-split-mem.v" - - val memMacro = SRAMMacro( - name = "target_memory", - width = width, - depth = memDepth, - family = "1r1w", - ports = Seq( - generateReadPort("outerB", width, memDepth), - generateWritePort("outerA", width, memDepth, memMaskGran) - ) - ) - - writeToLib(mem, Seq(memMacro)) - writeToLib(lib, Seq(generateSRAM("awesome_lib_mem", "lib", width, libDepth, libMaskGran))) - - val output = - """ -TODO -""" - - compileExecuteAndTest(mem, lib, v, output) - } - - "Non-masked split lib; regular mem" should "split fine" in { - // Enable this test when the memory compiler can compile non-matched - // memories (e.g. mrw mem and r+mw lib). - // Right now all we can get is a "port count must match" error. - // [edwardw]: does this even make sense? Can we compile a 2-ported memory using 1-ported memories? - pending - - val lib = "lib-split_depth-r-mw-split-lib-regular-mem.json" - val mem = "mem-split_depth-r-mw-split-lib-regular-mem.json" - val v = "split_depth-r-mw-split-lib-regular-mem.v" - - val libMacro = SRAMMacro( - name = "awesome_lib_mem", - width = width, - depth = libDepth, - family = "1rw", - ports = Seq( - generateReadPort("innerA", width, libDepth), - generateWritePort("innerB", width, libDepth, libMaskGran) - ) - ) - - writeToLib(mem, Seq(generateSRAM("target_memory", "outer", width, memDepth, memMaskGran))) - writeToLib(lib, Seq(libMacro)) - - val output = - """ -TODO -""" - - compileExecuteAndTest(mem, lib, v, output) - } -} diff --git a/src/test/scala/barstools/macros/SimpleSplitWidth.scala b/src/test/scala/barstools/macros/SimpleSplitWidth.scala deleted file mode 100644 index 3dffc66f5..000000000 --- a/src/test/scala/barstools/macros/SimpleSplitWidth.scala +++ /dev/null @@ -1,608 +0,0 @@ -package barstools.macros - -// Test the width splitting aspect of the memory compiler. -// For example, implementing a 1024x32 memory using four 1024x8 memories. - -trait HasSimpleWidthTestGenerator extends HasSimpleTestGenerator { - this: MacroCompilerSpec with HasSRAMGenerator => - def depth: BigInt - - override lazy val memDepth: BigInt = depth - override lazy val libDepth: BigInt = depth - - override def generateBody(): String = { - val output = new StringBuilder - - // Generate mem_0_ lines for number of width instances. - output.append( - (0 until widthInstances).map { i: Int => - s""" - inst mem_0_$i of $lib_name -""" - }.reduceLeft(_ + _) - ) - - // Generate submemory connection blocks. - output.append((for (i <- 0 until widthInstances) yield { - // Width of this submemory. - val myMemWidth = if (i == widthInstances - 1) lastWidthBits else usableLibWidth - // Base bit of this submemory. - // e.g. if libWidth is 8 and this is submemory 2 (0-indexed), then this - // would be 16. - val myBaseBit = usableLibWidth * i - - val maskStatement = generateMaskStatement(i, 0) - - // We need to use writeEnable as a crude "mask" if mem has a mask but - // lib does not. - val writeEnableBit = if (libMaskGran.isEmpty && memMaskGran.isDefined) { - val outerMaskBit = myBaseBit / memMaskGran.get - s"bits(outer_mask, $outerMaskBit, $outerMaskBit)" - } else """UInt<1>("h1")""" - val chipEnable = s"""UInt<1>("h1")""" - val writeEnableExpr = - if (libMaskGran.isEmpty) s"and(${memPortPrefix}_write_en, $chipEnable)" else s"${memPortPrefix}_write_en" - - s""" - mem_0_$i.${libPortPrefix}_clk <= ${memPortPrefix}_clk - mem_0_$i.${libPortPrefix}_addr <= ${memPortPrefix}_addr - node ${memPortPrefix}_dout_0_$i = bits(mem_0_$i.${libPortPrefix}_dout, ${myMemWidth - 1}, 0) - mem_0_$i.${libPortPrefix}_din <= bits(${memPortPrefix}_din, ${myBaseBit + myMemWidth - 1}, $myBaseBit) - $maskStatement - mem_0_$i.${libPortPrefix}_write_en <= and(and($writeEnableExpr, $writeEnableBit), UInt<1>("h1")) -""" - }).reduceLeft(_ + _)) - - // Generate final output that concats together the sub-memories. - // e.g. cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0)) - output.append { - val doutStatements = (widthInstances - 1 to 0 by -1).map(i => s"${memPortPrefix}_dout_0_$i") - val catStmt = doutStatements.init.foldRight(doutStatements.last)((l: String, r: String) => s"cat($l, $r)") - s""" - node ${memPortPrefix}_dout_0 = $catStmt -""" - } - - output.append(s""" - ${memPortPrefix}_dout <= mux(UInt<1>("h1"), ${memPortPrefix}_dout_0, UInt<$memWidth>("h0")) -""") - output.toString - } -} - -// Try different widths against a base memory width of 8. -class SplitWidth1024x128_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 128 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x64_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 64 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 8 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -// Try different widths against a base memory width of 16. -class SplitWidth1024x128_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 128 - override lazy val libWidth = 16 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x64_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 64 - override lazy val libWidth = 16 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x32_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val libWidth = 16 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_lib16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 16 - - compileExecuteAndTest(mem, lib, v, output) -} - -// Try different widths against a base memory width of 8 but depth 512 instead of 1024. -class SplitWidth512x128_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(512) - override lazy val memWidth = 128 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth512x64_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(512) - override lazy val memWidth = 64 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth512x32_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(512) - override lazy val memWidth = 32 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth512x16_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(512) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth512x8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(512) - override lazy val memWidth = 8 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -// Try non-power of two widths against a base memory width of 8. -class SplitWidth1024x67_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 67 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x60_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 60 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x42_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 42 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x20_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 20 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x17_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 17 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x15_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 15 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x9_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 9 - override lazy val libWidth = 8 - - compileExecuteAndTest(mem, lib, v, output) -} - -// Try against a non-power of two base memory width. -class SplitWidth1024x64_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 64 - override lazy val libWidth = 11 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x33_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 33 - override lazy val libWidth = 11 - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_mem11_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 11 - - compileExecuteAndTest(mem, lib, v, output) -} - -// Masked RAM - -class SplitWidth1024x8_memGran_8_libGran_1_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 8 - override lazy val libWidth = 8 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_memGran_8_libGran_1_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_memGran_8_libGran_8_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(8) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x128_memGran_8_libGran_1_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 128 - override lazy val libWidth = 32 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_memGran_4_libGran_1_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val memMaskGran: Option[Int] = Some(4) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_memGran_2_libGran_1_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val memMaskGran: Option[Int] = Some(2) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_memGran_16_libGran_1_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val memMaskGran: Option[Int] = Some(16) - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -// Non-masked mem, masked lib - -class SplitWidth1024x16_libGran_8_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val libMaskGran: Option[Int] = Some(8) - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x16_libGran_1_rw extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val libMaskGran: Option[Int] = Some(1) - - compileExecuteAndTest(mem, lib, v, output) -} - -// Non-memMask and non-1 libMask - -class SplitWidth1024x16_memGran_8_libGran_2_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(2) - - compileExecuteAndTest(mem, lib, v, output) -} - -// Non-power of two memGran - -class SplitWidth1024x16_memGran_9_libGran_1_rw - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - override lazy val depth = BigInt(1024) - override lazy val memWidth = 16 - override lazy val libWidth = 8 - override lazy val memMaskGran: Option[Int] = Some(9) - override lazy val libMaskGran: Option[Int] = Some(1) - - (it should "be enabled when non-power of two masks are supported").is(pending) - //~ compile(mem, lib, v, false) - //~ execute(mem, lib, false, output) -} - -// Read enable - -class SplitWidth1024x32_readEnable_Lib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - import mdf.macrolib._ - - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val libWidth = 8 - - override def generateLibSRAM(): SRAMMacro = { - SRAMMacro( - name = lib_name, - width = libWidth, - depth = libDepth, - family = "1rw", - ports = Seq( - generateTestPort( - "lib", - Some(libWidth), - Some(libDepth), - maskGran = libMaskGran, - write = true, - writeEnable = true, - read = true, - readEnable = true - ) - ) - ) - } - - override def generateBody() = - """ - inst mem_0_0 of awesome_lib_mem - inst mem_0_1 of awesome_lib_mem - inst mem_0_2 of awesome_lib_mem - inst mem_0_3 of awesome_lib_mem - mem_0_0.lib_clk <= outer_clk - mem_0_0.lib_addr <= outer_addr - node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) - mem_0_0.lib_din <= bits(outer_din, 7, 0) - mem_0_0.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) - mem_0_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_1.lib_clk <= outer_clk - mem_0_1.lib_addr <= outer_addr - node outer_dout_0_1 = bits(mem_0_1.lib_dout, 7, 0) - mem_0_1.lib_din <= bits(outer_din, 15, 8) - mem_0_1.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) - mem_0_1.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_2.lib_clk <= outer_clk - mem_0_2.lib_addr <= outer_addr - node outer_dout_0_2 = bits(mem_0_2.lib_dout, 7, 0) - mem_0_2.lib_din <= bits(outer_din, 23, 16) - mem_0_2.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) - mem_0_2.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_3.lib_clk <= outer_clk - mem_0_3.lib_addr <= outer_addr - node outer_dout_0_3 = bits(mem_0_3.lib_dout, 7, 0) - mem_0_3.lib_din <= bits(outer_din, 31, 24) - mem_0_3.lib_read_en <= and(and(not(outer_write_en), UInt<1>("h1")), UInt<1>("h1")) - mem_0_3.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - node outer_dout_0 = cat(outer_dout_0_3, cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0))) - outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<32>("h0")) -""" - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x32_readEnable_Mem - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - import mdf.macrolib._ - - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val libWidth = 8 - - override def generateMemSRAM(): SRAMMacro = { - SRAMMacro( - name = mem_name, - width = memWidth, - depth = memDepth, - family = "1rw", - ports = Seq( - generateTestPort( - "outer", - Some(memWidth), - Some(memDepth), - maskGran = memMaskGran, - write = true, - writeEnable = true, - read = true, - readEnable = true - ) - ) - ) - } - - // No need to override body here due to the lack of a readEnable in the lib. - - compileExecuteAndTest(mem, lib, v, output) -} - -class SplitWidth1024x32_readEnable_LibMem - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator { - import mdf.macrolib._ - - override lazy val depth = BigInt(1024) - override lazy val memWidth = 32 - override lazy val libWidth = 8 - - override def generateLibSRAM(): SRAMMacro = { - SRAMMacro( - name = lib_name, - width = libWidth, - depth = libDepth, - family = "1rw", - ports = Seq( - generateTestPort( - "lib", - Some(libWidth), - Some(libDepth), - maskGran = libMaskGran, - write = true, - writeEnable = true, - read = true, - readEnable = true - ) - ) - ) - } - - override def generateMemSRAM(): SRAMMacro = { - SRAMMacro( - name = mem_name, - width = memWidth, - depth = memDepth, - family = "1rw", - ports = Seq( - generateTestPort( - "outer", - Some(memWidth), - Some(memDepth), - maskGran = memMaskGran, - write = true, - writeEnable = true, - read = true, - readEnable = true - ) - ) - ) - } - - override def generateBody() = - """ - inst mem_0_0 of awesome_lib_mem - inst mem_0_1 of awesome_lib_mem - inst mem_0_2 of awesome_lib_mem - inst mem_0_3 of awesome_lib_mem - mem_0_0.lib_clk <= outer_clk - mem_0_0.lib_addr <= outer_addr - node outer_dout_0_0 = bits(mem_0_0.lib_dout, 7, 0) - mem_0_0.lib_din <= bits(outer_din, 7, 0) - mem_0_0.lib_read_en <= and(outer_read_en, UInt<1>("h1")) - mem_0_0.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_1.lib_clk <= outer_clk - mem_0_1.lib_addr <= outer_addr - node outer_dout_0_1 = bits(mem_0_1.lib_dout, 7, 0) - mem_0_1.lib_din <= bits(outer_din, 15, 8) - mem_0_1.lib_read_en <= and(outer_read_en, UInt<1>("h1")) - mem_0_1.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_2.lib_clk <= outer_clk - mem_0_2.lib_addr <= outer_addr - node outer_dout_0_2 = bits(mem_0_2.lib_dout, 7, 0) - mem_0_2.lib_din <= bits(outer_din, 23, 16) - mem_0_2.lib_read_en <= and(outer_read_en, UInt<1>("h1")) - mem_0_2.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_3.lib_clk <= outer_clk - mem_0_3.lib_addr <= outer_addr - node outer_dout_0_3 = bits(mem_0_3.lib_dout, 7, 0) - mem_0_3.lib_din <= bits(outer_din, 31, 24) - mem_0_3.lib_read_en <= and(outer_read_en, UInt<1>("h1")) - mem_0_3.lib_write_en <= and(and(and(outer_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - node outer_dout_0 = cat(outer_dout_0_3, cat(outer_dout_0_2, cat(outer_dout_0_1, outer_dout_0_0))) - outer_dout <= mux(UInt<1>("h1"), outer_dout_0, UInt<32>("h0")) -""" - - compileExecuteAndTest(mem, lib, v, output) -} diff --git a/src/test/scala/barstools/macros/SpecificExamples.scala b/src/test/scala/barstools/macros/SpecificExamples.scala deleted file mode 100644 index b0e2467f3..000000000 --- a/src/test/scala/barstools/macros/SpecificExamples.scala +++ /dev/null @@ -1,1762 +0,0 @@ -// See LICENSE for license details. -package barstools.macros - -import firrtl.FileUtils -import mdf.macrolib.{Constant, MacroExtraPort, SRAMMacro} - -// Specific one-off tests to run, not created by a generator. - -// Check that verilog actually gets generated. -// TODO: check the actual verilog's correctness? -class GenerateSomeVerilog extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleDepthTestGenerator { - override lazy val width = 32 - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - - it should "execute fine" in { - compileExecuteAndTest(mem, lib, v, output) - } - - it should "generate non-empty verilog" in { - val verilog = FileUtils.getText(vPrefix + "/" + v) - verilog.isEmpty shouldBe false - } -} - -class WriteEnableTest extends MacroCompilerSpec with HasSRAMGenerator { - val mem = s"mem-WriteEnableTest.json" // mem. you want to create - val lib = s"lib-WriteEnableTest.json" // lib. of mems to create it - val v = s"WriteEnableTest.json" - - override val libPrefix = "src/test/resources" - - val memSRAMs: Seq[mdf.macrolib.Macro] = mdf.macrolib.Utils - .readMDFFromString(""" -[ { - "type" : "sram", - "name" : "cc_banks_0_ext", - "width" : 64, - "depth" : "4096", - "mux" : 1, - "ports" : [ { - "address port name" : "RW0_addr", - "address port polarity" : "active high", - "clock port name" : "RW0_clk", - "clock port polarity" : "positive edge", - "write enable port name" : "RW0_wmode", - "write enable port polarity" : "active high", - "chip enable port name" : "RW0_en", - "chip enable port polarity" : "active high", - "output port name" : "RW0_rdata", - "output port polarity" : "active high", - "input port name" : "RW0_wdata", - "input port polarity" : "active high" - } ], - "family" : "1rw" -} ] -""").getOrElse(Seq()) - - writeToMem(mem, memSRAMs) - - val output = - """ -circuit cc_banks_0_ext : - module cc_banks_0_ext : - input RW0_addr : UInt<12> - input RW0_clk : Clock - input RW0_wdata : UInt<64> - output RW0_rdata : UInt<64> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - - inst mem_0_0 of fake_mem - mem_0_0.clk <= RW0_clk - mem_0_0.addr <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.dataout, 63, 0) - mem_0_0.datain <= bits(RW0_wdata, 63, 0) - mem_0_0.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) - mem_0_0.wen <= and(and(and(RW0_wmode, RW0_en), UInt<1>("h1")), UInt<1>("h1")) - node RW0_rdata_0 = RW0_rdata_0_0 - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<64>("h0")) - - extmodule fake_mem : - input addr : UInt<12> - input clk : Clock - input datain : UInt<64> - output dataout : UInt<64> - input ren : UInt<1> - input wen : UInt<1> - - defname = fake_mem -""" - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class MaskPortTest extends MacroCompilerSpec with HasSRAMGenerator { - val mem = s"mem-MaskPortTest.json" // mem. you want to create - val lib = s"lib-MaskPortTest.json" // lib. of mems to create it - val v = s"MaskPortTest.json" - - override val libPrefix = "src/test/resources" - - val memSRAMs: Seq[mdf.macrolib.Macro] = mdf.macrolib.Utils - .readMDFFromString(""" -[ { - "type" : "sram", - "name" : "cc_dir_ext", - "width" : 128, - "depth" : "512", - "mux" : 1, - "ports" : [ { - "address port name" : "RW0_addr", - "address port polarity" : "active high", - "clock port name" : "RW0_clk", - "clock port polarity" : "positive edge", - "write enable port name" : "RW0_wmode", - "write enable port polarity" : "active high", - "chip enable port name" : "RW0_en", - "chip enable port polarity" : "active high", - "output port name" : "RW0_rdata", - "output port polarity" : "active high", - "input port name" : "RW0_wdata", - "input port polarity" : "active high", - "mask port name" : "RW0_wmask", - "mask port polarity" : "active high", - "mask granularity" : 16 - } ], - "family" : "1rw" -} ] -""").getOrElse(List()) - - writeToMem(mem, memSRAMs) - - val output = - """ -circuit cc_dir_ext : - module cc_dir_ext : - input RW0_addr : UInt<9> - input RW0_clk : Clock - input RW0_wdata : UInt<128> - output RW0_rdata : UInt<128> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - input RW0_wmask : UInt<8> - - inst mem_0_0 of fake_mem - inst mem_0_1 of fake_mem - mem_0_0.clk <= RW0_clk - mem_0_0.addr <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.dataout, 63, 0) - mem_0_0.datain <= bits(RW0_wdata, 63, 0) - mem_0_0.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) - mem_0_0.mport <= not(cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 3, 3), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 2, 2), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 1, 1), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), cat(bits(RW0_wmask, 0, 0), bits(RW0_wmask, 0, 0))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) - mem_0_0.wen <= and(and(RW0_wmode, RW0_en), UInt<1>("h1")) - mem_0_1.clk <= RW0_clk - mem_0_1.addr <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.dataout, 63, 0) - mem_0_1.datain <= bits(RW0_wdata, 127, 64) - mem_0_1.ren <= and(and(not(RW0_wmode), RW0_en), UInt<1>("h1")) - mem_0_1.mport <= not(cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 7, 7), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 6, 6), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 5, 5), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), cat(bits(RW0_wmask, 4, 4), bits(RW0_wmask, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) - mem_0_1.wen <= and(and(RW0_wmode, RW0_en), UInt<1>("h1")) - node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<128>("h0")) - - extmodule fake_mem : - input addr : UInt<9> - input clk : Clock - input datain : UInt<64> - output dataout : UInt<64> - input ren : UInt<1> - input wen : UInt<1> - input mport : UInt<64> - - defname = fake_mem -""" - - it should "compile, execute, and test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class BOOMTest extends MacroCompilerSpec with HasSRAMGenerator { - val mem = s"mem-BOOMTest.json" - val lib = s"lib-BOOMTest.json" - val v = s"BOOMTest.v" - - override val libPrefix = "src/test/resources" - - val memSRAMs: Seq[mdf.macrolib.Macro] = mdf.macrolib.Utils - .readMDFFromString(""" -[ { - "type" : "sram", - "name" : "_T_182_ext", - "width" : 88, - "depth" : "64", - "ports" : [ { - "address port name" : "R0_addr", - "address port polarity" : "active high", - "clock port name" : "R0_clk", - "clock port polarity" : "active high", - "chip enable port name" : "R0_en", - "chip enable port polarity" : "active high", - "output port name" : "R0_data", - "output port polarity" : "active high" - }, { - "address port name" : "W0_addr", - "address port polarity" : "active high", - "clock port name" : "W0_clk", - "clock port polarity" : "active high", - "chip enable port name" : "W0_en", - "chip enable port polarity" : "active high", - "input port name" : "W0_data", - "input port polarity" : "active high", - "mask port name" : "W0_mask", - "mask port polarity" : "active high", - "mask granularity" : 22 - } ] -}, { - "type" : "sram", - "name" : "_T_84_ext", - "width" : 64, - "depth" : "512", - "ports" : [ { - "address port name" : "R0_addr", - "address port polarity" : "active high", - "clock port name" : "R0_clk", - "clock port polarity" : "active high", - "chip enable port name" : "R0_en", - "chip enable port polarity" : "active high", - "output port name" : "R0_data", - "output port polarity" : "active high" - }, { - "address port name" : "W0_addr", - "address port polarity" : "active high", - "clock port name" : "W0_clk", - "clock port polarity" : "active high", - "chip enable port name" : "W0_en", - "chip enable port polarity" : "active high", - "input port name" : "W0_data", - "input port polarity" : "active high", - "mask port name" : "W0_mask", - "mask port polarity" : "active high", - "mask granularity" : 64 - } ] -}, { - "type" : "sram", - "name" : "tag_array_ext", - "width" : 80, - "depth" : "64", - "ports" : [ { - "address port name" : "RW0_addr", - "address port polarity" : "active high", - "clock port name" : "RW0_clk", - "clock port polarity" : "active high", - "write enable port name" : "RW0_wmode", - "write enable port polarity" : "active high", - "chip enable port name" : "RW0_en", - "chip enable port polarity" : "active high", - "output port name" : "RW0_rdata", - "output port polarity" : "active high", - "input port name" : "RW0_wdata", - "input port polarity" : "active high", - "mask port name" : "RW0_wmask", - "mask port polarity" : "active high", - "mask granularity" : 20 - } ] -}, { - "type" : "sram", - "name" : "_T_886_ext", - "width" : 64, - "depth" : "512", - "ports" : [ { - "address port name" : "RW0_addr", - "address port polarity" : "active high", - "clock port name" : "RW0_clk", - "clock port polarity" : "active high", - "write enable port name" : "RW0_wmode", - "write enable port polarity" : "active high", - "chip enable port name" : "RW0_en", - "chip enable port polarity" : "active high", - "output port name" : "RW0_rdata", - "output port polarity" : "active high", - "input port name" : "RW0_wdata", - "input port polarity" : "active high" - } ] -}, { - "type" : "sram", - "name" : "entries_info_ext", - "width" : 40, - "depth" : "24", - "ports" : [ { - "address port name" : "R0_addr", - "address port polarity" : "active high", - "clock port name" : "R0_clk", - "clock port polarity" : "active high", - "chip enable port name" : "R0_en", - "chip enable port polarity" : "active high", - "output port name" : "R0_data", - "output port polarity" : "active high" - }, { - "address port name" : "W0_addr", - "address port polarity" : "active high", - "clock port name" : "W0_clk", - "clock port polarity" : "active high", - "chip enable port name" : "W0_en", - "chip enable port polarity" : "active high", - "input port name" : "W0_data", - "input port polarity" : "active high" - } ] -}, { - "type" : "sram", - "name" : "smem_ext", - "width" : 32, - "depth" : "32", - "ports" : [ { - "address port name" : "RW0_addr", - "address port polarity" : "active high", - "clock port name" : "RW0_clk", - "clock port polarity" : "active high", - "write enable port name" : "RW0_wmode", - "write enable port polarity" : "active high", - "chip enable port name" : "RW0_en", - "chip enable port polarity" : "active high", - "output port name" : "RW0_rdata", - "output port polarity" : "active high", - "input port name" : "RW0_wdata", - "input port polarity" : "active high", - "mask port name" : "RW0_wmask", - "mask port polarity" : "active high", - "mask granularity" : 1 - } ] -}, { - "type" : "sram", - "name" : "smem_0_ext", - "width" : 32, - "depth" : "64", - "ports" : [ { - "address port name" : "RW0_addr", - "address port polarity" : "active high", - "clock port name" : "RW0_clk", - "clock port polarity" : "active high", - "write enable port name" : "RW0_wmode", - "write enable port polarity" : "active high", - "chip enable port name" : "RW0_en", - "chip enable port polarity" : "active high", - "output port name" : "RW0_rdata", - "output port polarity" : "active high", - "input port name" : "RW0_wdata", - "input port polarity" : "active high", - "mask port name" : "RW0_wmask", - "mask port polarity" : "active high", - "mask granularity" : 1 - } ] -} ] -""").getOrElse(List()) - - writeToMem(mem, memSRAMs) - - val output = // TODO: check correctness... - """ -circuit smem_0_ext : - module _T_182_ext : - input R0_addr : UInt<6> - input R0_clk : Clock - output R0_data : UInt<88> - input R0_en : UInt<1> - input W0_addr : UInt<6> - input W0_clk : Clock - input W0_data : UInt<88> - input W0_en : UInt<1> - input W0_mask : UInt<4> - - node R0_addr_sel = bits(R0_addr, 5, 5) - reg R0_addr_sel_reg : UInt<1>, R0_clk with : - reset => (UInt<1>("h0"), R0_addr_sel_reg) - R0_addr_sel_reg <= mux(R0_en, R0_addr_sel, R0_addr_sel_reg) - node W0_addr_sel = bits(W0_addr, 5, 5) - inst mem_0_0 of my_sram_2rw_32x22 - inst mem_0_1 of my_sram_2rw_32x22 - inst mem_0_2 of my_sram_2rw_32x22 - inst mem_0_3 of my_sram_2rw_32x22 - mem_0_0.CE1 <= W0_clk - mem_0_0.A1 <= W0_addr - mem_0_0.I1 <= bits(W0_data, 21, 0) - mem_0_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_1.CE1 <= W0_clk - mem_0_1.A1 <= W0_addr - mem_0_1.I1 <= bits(W0_data, 43, 22) - mem_0_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_2.CE1 <= W0_clk - mem_0_2.A1 <= W0_addr - mem_0_2.I1 <= bits(W0_data, 65, 44) - mem_0_2.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_2.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_3.CE1 <= W0_clk - mem_0_3.A1 <= W0_addr - mem_0_3.I1 <= bits(W0_data, 87, 66) - mem_0_3.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_3.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h0")))) - mem_0_0.CE2 <= R0_clk - mem_0_0.A2 <= R0_addr - node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) - mem_0_0.I2 is invalid - mem_0_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_1.CE2 <= R0_clk - mem_0_1.A2 <= R0_addr - node R0_data_0_1 = bits(mem_0_1.O2, 21, 0) - mem_0_1.I2 is invalid - mem_0_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_2.CE2 <= R0_clk - mem_0_2.A2 <= R0_addr - node R0_data_0_2 = bits(mem_0_2.O2, 21, 0) - mem_0_2.I2 is invalid - mem_0_2.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_2.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_3.CE2 <= R0_clk - mem_0_3.A2 <= R0_addr - node R0_data_0_3 = bits(mem_0_3.O2, 21, 0) - mem_0_3.I2 is invalid - mem_0_3.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h0")))) - mem_0_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h0")))) - node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) - inst mem_1_0 of my_sram_2rw_32x22 - inst mem_1_1 of my_sram_2rw_32x22 - inst mem_1_2 of my_sram_2rw_32x22 - inst mem_1_3 of my_sram_2rw_32x22 - mem_1_0.CE1 <= W0_clk - mem_1_0.A1 <= W0_addr - mem_1_0.I1 <= bits(W0_data, 21, 0) - mem_1_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_1.CE1 <= W0_clk - mem_1_1.A1 <= W0_addr - mem_1_1.I1 <= bits(W0_data, 43, 22) - mem_1_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_2.CE1 <= W0_clk - mem_1_2.A1 <= W0_addr - mem_1_2.I1 <= bits(W0_data, 65, 44) - mem_1_2.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_2.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_2.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_3.CE1 <= W0_clk - mem_1_3.A1 <= W0_addr - mem_1_3.I1 <= bits(W0_data, 87, 66) - mem_1_3.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_3.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_3.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<1>("h1")))) - mem_1_0.CE2 <= R0_clk - mem_1_0.A2 <= R0_addr - node R0_data_1_0 = bits(mem_1_0.O2, 21, 0) - mem_1_0.I2 is invalid - mem_1_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_1.CE2 <= R0_clk - mem_1_1.A2 <= R0_addr - node R0_data_1_1 = bits(mem_1_1.O2, 21, 0) - mem_1_1.I2 is invalid - mem_1_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_2.CE2 <= R0_clk - mem_1_2.A2 <= R0_addr - node R0_data_1_2 = bits(mem_1_2.O2, 21, 0) - mem_1_2.I2 is invalid - mem_1_2.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_2.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_2.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_3.CE2 <= R0_clk - mem_1_3.A2 <= R0_addr - node R0_data_1_3 = bits(mem_1_3.O2, 21, 0) - mem_1_3.I2 is invalid - mem_1_3.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_3.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<1>("h1")))) - mem_1_3.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<1>("h1")))) - node R0_data_1 = cat(R0_data_1_3, cat(R0_data_1_2, cat(R0_data_1_1, R0_data_1_0))) - R0_data <= mux(eq(R0_addr_sel_reg, UInt<1>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<1>("h1")), R0_data_1, UInt<88>("h0"))) - - - module _T_84_ext : - input R0_addr : UInt<9> - input R0_clk : Clock - output R0_data : UInt<64> - input R0_en : UInt<1> - input W0_addr : UInt<9> - input W0_clk : Clock - input W0_data : UInt<64> - input W0_en : UInt<1> - input W0_mask : UInt<1> - - node R0_addr_sel = bits(R0_addr, 8, 7) - reg R0_addr_sel_reg : UInt<2>, R0_clk with : - reset => (UInt<1>("h0"), R0_addr_sel_reg) - R0_addr_sel_reg <= mux(R0_en, R0_addr_sel, R0_addr_sel_reg) - node W0_addr_sel = bits(W0_addr, 8, 7) - inst mem_0_0 of my_sram_2rw_128x32 - inst mem_0_1 of my_sram_2rw_128x32 - mem_0_0.CE1 <= W0_clk - mem_0_0.A1 <= W0_addr - mem_0_0.I1 <= bits(W0_data, 31, 0) - mem_0_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h0")))) - mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h0")))) - mem_0_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h0")))) - mem_0_1.CE1 <= W0_clk - mem_0_1.A1 <= W0_addr - mem_0_1.I1 <= bits(W0_data, 63, 32) - mem_0_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h0")))) - mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h0")))) - mem_0_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h0")))) - mem_0_0.CE2 <= R0_clk - mem_0_0.A2 <= R0_addr - node R0_data_0_0 = bits(mem_0_0.O2, 31, 0) - mem_0_0.I2 is invalid - mem_0_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h0")))) - mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h0")))) - mem_0_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h0")))) - mem_0_1.CE2 <= R0_clk - mem_0_1.A2 <= R0_addr - node R0_data_0_1 = bits(mem_0_1.O2, 31, 0) - mem_0_1.I2 is invalid - mem_0_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h0")))) - mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h0")))) - mem_0_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h0")))) - node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) - inst mem_1_0 of my_sram_2rw_128x32 - inst mem_1_1 of my_sram_2rw_128x32 - mem_1_0.CE1 <= W0_clk - mem_1_0.A1 <= W0_addr - mem_1_0.I1 <= bits(W0_data, 31, 0) - mem_1_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h1")))) - mem_1_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h1")))) - mem_1_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h1")))) - mem_1_1.CE1 <= W0_clk - mem_1_1.A1 <= W0_addr - mem_1_1.I1 <= bits(W0_data, 63, 32) - mem_1_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h1")))) - mem_1_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h1")))) - mem_1_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h1")))) - mem_1_0.CE2 <= R0_clk - mem_1_0.A2 <= R0_addr - node R0_data_1_0 = bits(mem_1_0.O2, 31, 0) - mem_1_0.I2 is invalid - mem_1_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h1")))) - mem_1_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h1")))) - mem_1_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h1")))) - mem_1_1.CE2 <= R0_clk - mem_1_1.A2 <= R0_addr - node R0_data_1_1 = bits(mem_1_1.O2, 31, 0) - mem_1_1.I2 is invalid - mem_1_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h1")))) - mem_1_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h1")))) - mem_1_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h1")))) - node R0_data_1 = cat(R0_data_1_1, R0_data_1_0) - inst mem_2_0 of my_sram_2rw_128x32 - inst mem_2_1 of my_sram_2rw_128x32 - mem_2_0.CE1 <= W0_clk - mem_2_0.A1 <= W0_addr - mem_2_0.I1 <= bits(W0_data, 31, 0) - mem_2_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h2")))) - mem_2_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h2")))) - mem_2_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h2")))) - mem_2_1.CE1 <= W0_clk - mem_2_1.A1 <= W0_addr - mem_2_1.I1 <= bits(W0_data, 63, 32) - mem_2_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h2")))) - mem_2_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h2")))) - mem_2_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h2")))) - mem_2_0.CE2 <= R0_clk - mem_2_0.A2 <= R0_addr - node R0_data_2_0 = bits(mem_2_0.O2, 31, 0) - mem_2_0.I2 is invalid - mem_2_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h2")))) - mem_2_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h2")))) - mem_2_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h2")))) - mem_2_1.CE2 <= R0_clk - mem_2_1.A2 <= R0_addr - node R0_data_2_1 = bits(mem_2_1.O2, 31, 0) - mem_2_1.I2 is invalid - mem_2_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h2")))) - mem_2_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h2")))) - mem_2_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h2")))) - node R0_data_2 = cat(R0_data_2_1, R0_data_2_0) - inst mem_3_0 of my_sram_2rw_128x32 - inst mem_3_1 of my_sram_2rw_128x32 - mem_3_0.CE1 <= W0_clk - mem_3_0.A1 <= W0_addr - mem_3_0.I1 <= bits(W0_data, 31, 0) - mem_3_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h3")))) - mem_3_0.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h3")))) - mem_3_0.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h3")))) - mem_3_1.CE1 <= W0_clk - mem_3_1.A1 <= W0_addr - mem_3_1.I1 <= bits(W0_data, 63, 32) - mem_3_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), eq(W0_addr_sel, UInt<2>("h3")))) - mem_3_1.WEB1 <= not(and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), eq(W0_addr_sel, UInt<2>("h3")))) - mem_3_1.CSB1 <= not(and(W0_en, eq(W0_addr_sel, UInt<2>("h3")))) - mem_3_0.CE2 <= R0_clk - mem_3_0.A2 <= R0_addr - node R0_data_3_0 = bits(mem_3_0.O2, 31, 0) - mem_3_0.I2 is invalid - mem_3_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h3")))) - mem_3_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) - mem_3_0.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) - mem_3_1.CE2 <= R0_clk - mem_3_1.A2 <= R0_addr - node R0_data_3_1 = bits(mem_3_1.O2, 31, 0) - mem_3_1.I2 is invalid - mem_3_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), eq(R0_addr_sel, UInt<2>("h3")))) - mem_3_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), eq(R0_addr_sel, UInt<2>("h3")))) - mem_3_1.CSB2 <= not(and(R0_en, eq(R0_addr_sel, UInt<2>("h3")))) - node R0_data_3 = cat(R0_data_3_1, R0_data_3_0) - R0_data <= mux(eq(R0_addr_sel_reg, UInt<2>("h0")), R0_data_0, mux(eq(R0_addr_sel_reg, UInt<2>("h1")), R0_data_1, mux(eq(R0_addr_sel_reg, UInt<2>("h2")), R0_data_2, mux(eq(R0_addr_sel_reg, UInt<2>("h3")), R0_data_3, UInt<64>("h0"))))) - - extmodule my_sram_2rw_128x32 : - input A1 : UInt<7> - input CE1 : Clock - input I1 : UInt<32> - output O1 : UInt<32> - input CSB1 : UInt<1> - input OEB1 : UInt<1> - input WEB1 : UInt<1> - input A2 : UInt<7> - input CE2 : Clock - input I2 : UInt<32> - output O2 : UInt<32> - input CSB2 : UInt<1> - input OEB2 : UInt<1> - input WEB2 : UInt<1> - - defname = my_sram_2rw_128x32 - - - module tag_array_ext : - input RW0_addr : UInt<6> - input RW0_clk : Clock - input RW0_wdata : UInt<80> - output RW0_rdata : UInt<80> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - input RW0_wmask : UInt<4> - - inst mem_0_0 of my_sram_1rw_64x32 - inst mem_0_1 of my_sram_1rw_64x32 - inst mem_0_2 of my_sram_1rw_64x32 - inst mem_0_3 of my_sram_1rw_64x32 - mem_0_0.CE <= RW0_clk - mem_0_0.A <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.O, 19, 0) - mem_0_0.I <= bits(RW0_wdata, 19, 0) - mem_0_0.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) - mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_1.CE <= RW0_clk - mem_0_1.A <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.O, 19, 0) - mem_0_1.I <= bits(RW0_wdata, 39, 20) - mem_0_1.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) - mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_2.CE <= RW0_clk - mem_0_2.A <= RW0_addr - node RW0_rdata_0_2 = bits(mem_0_2.O, 19, 0) - mem_0_2.I <= bits(RW0_wdata, 59, 40) - mem_0_2.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) - mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_3.CE <= RW0_clk - mem_0_3.A <= RW0_addr - node RW0_rdata_0_3 = bits(mem_0_3.O, 19, 0) - mem_0_3.I <= bits(RW0_wdata, 79, 60) - mem_0_3.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) - mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) - node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<80>("h0")) - - extmodule my_sram_1rw_64x32 : - input A : UInt<6> - input CE : Clock - input I : UInt<32> - output O : UInt<32> - input CSB : UInt<1> - input OEB : UInt<1> - input WEB : UInt<1> - - defname = my_sram_1rw_64x32 - - - module _T_886_ext : - input RW0_addr : UInt<9> - input RW0_clk : Clock - input RW0_wdata : UInt<64> - output RW0_rdata : UInt<64> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - - inst mem_0_0 of my_sram_1rw_512x32 - inst mem_0_1 of my_sram_1rw_512x32 - mem_0_0.CE <= RW0_clk - mem_0_0.A <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.O, 31, 0) - mem_0_0.I <= bits(RW0_wdata, 31, 0) - mem_0_0.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_0.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_1.CE <= RW0_clk - mem_0_1.A <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.O, 31, 0) - mem_0_1.I <= bits(RW0_wdata, 63, 32) - mem_0_1.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_1.WEB <= not(and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) - node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<64>("h0")) - - extmodule my_sram_1rw_512x32 : - input A : UInt<9> - input CE : Clock - input I : UInt<32> - output O : UInt<32> - input CSB : UInt<1> - input OEB : UInt<1> - input WEB : UInt<1> - - defname = my_sram_1rw_512x32 - - - module entries_info_ext : - input R0_addr : UInt<5> - input R0_clk : Clock - output R0_data : UInt<40> - input R0_en : UInt<1> - input W0_addr : UInt<5> - input W0_clk : Clock - input W0_data : UInt<40> - input W0_en : UInt<1> - - inst mem_0_0 of my_sram_2rw_32x22 - inst mem_0_1 of my_sram_2rw_32x22 - mem_0_0.CE1 <= W0_clk - mem_0_0.A1 <= W0_addr - mem_0_0.I1 <= bits(W0_data, 21, 0) - mem_0_0.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), UInt<1>("h1"))) - mem_0_0.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_1.CE1 <= W0_clk - mem_0_1.A1 <= W0_addr - mem_0_1.I1 <= bits(W0_data, 39, 22) - mem_0_1.OEB1 <= not(and(and(not(UInt<1>("h1")), W0_en), UInt<1>("h1"))) - mem_0_1.WEB1 <= not(and(and(UInt<1>("h1"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.CSB1 <= not(and(W0_en, UInt<1>("h1"))) - mem_0_0.CE2 <= R0_clk - mem_0_0.A2 <= R0_addr - node R0_data_0_0 = bits(mem_0_0.O2, 21, 0) - mem_0_0.I2 is invalid - mem_0_0.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), UInt<1>("h1"))) - mem_0_0.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_0.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - mem_0_1.CE2 <= R0_clk - mem_0_1.A2 <= R0_addr - node R0_data_0_1 = bits(mem_0_1.O2, 17, 0) - mem_0_1.I2 is invalid - mem_0_1.OEB2 <= not(and(and(not(UInt<1>("h0")), R0_en), UInt<1>("h1"))) - mem_0_1.WEB2 <= not(and(and(UInt<1>("h0"), UInt<1>("h1")), UInt<1>("h1"))) - mem_0_1.CSB2 <= not(and(R0_en, UInt<1>("h1"))) - node R0_data_0 = cat(R0_data_0_1, R0_data_0_0) - R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<40>("h0")) - - extmodule my_sram_2rw_32x22 : - input A1 : UInt<5> - input CE1 : Clock - input I1 : UInt<22> - output O1 : UInt<22> - input CSB1 : UInt<1> - input OEB1 : UInt<1> - input WEB1 : UInt<1> - input A2 : UInt<5> - input CE2 : Clock - input I2 : UInt<22> - output O2 : UInt<22> - input CSB2 : UInt<1> - input OEB2 : UInt<1> - input WEB2 : UInt<1> - - defname = my_sram_2rw_32x22 - - - module smem_ext : - input RW0_addr : UInt<5> - input RW0_clk : Clock - input RW0_wdata : UInt<32> - output RW0_rdata : UInt<32> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - input RW0_wmask : UInt<32> - - inst mem_0_0 of my_sram_1rw_64x8 - inst mem_0_1 of my_sram_1rw_64x8 - inst mem_0_2 of my_sram_1rw_64x8 - inst mem_0_3 of my_sram_1rw_64x8 - inst mem_0_4 of my_sram_1rw_64x8 - inst mem_0_5 of my_sram_1rw_64x8 - inst mem_0_6 of my_sram_1rw_64x8 - inst mem_0_7 of my_sram_1rw_64x8 - inst mem_0_8 of my_sram_1rw_64x8 - inst mem_0_9 of my_sram_1rw_64x8 - inst mem_0_10 of my_sram_1rw_64x8 - inst mem_0_11 of my_sram_1rw_64x8 - inst mem_0_12 of my_sram_1rw_64x8 - inst mem_0_13 of my_sram_1rw_64x8 - inst mem_0_14 of my_sram_1rw_64x8 - inst mem_0_15 of my_sram_1rw_64x8 - inst mem_0_16 of my_sram_1rw_64x8 - inst mem_0_17 of my_sram_1rw_64x8 - inst mem_0_18 of my_sram_1rw_64x8 - inst mem_0_19 of my_sram_1rw_64x8 - inst mem_0_20 of my_sram_1rw_64x8 - inst mem_0_21 of my_sram_1rw_64x8 - inst mem_0_22 of my_sram_1rw_64x8 - inst mem_0_23 of my_sram_1rw_64x8 - inst mem_0_24 of my_sram_1rw_64x8 - inst mem_0_25 of my_sram_1rw_64x8 - inst mem_0_26 of my_sram_1rw_64x8 - inst mem_0_27 of my_sram_1rw_64x8 - inst mem_0_28 of my_sram_1rw_64x8 - inst mem_0_29 of my_sram_1rw_64x8 - inst mem_0_30 of my_sram_1rw_64x8 - inst mem_0_31 of my_sram_1rw_64x8 - mem_0_0.CE <= RW0_clk - mem_0_0.A <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.O, 0, 0) - mem_0_0.I <= bits(RW0_wdata, 0, 0) - mem_0_0.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) - mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_1.CE <= RW0_clk - mem_0_1.A <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.O, 0, 0) - mem_0_1.I <= bits(RW0_wdata, 1, 1) - mem_0_1.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) - mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_2.CE <= RW0_clk - mem_0_2.A <= RW0_addr - node RW0_rdata_0_2 = bits(mem_0_2.O, 0, 0) - mem_0_2.I <= bits(RW0_wdata, 2, 2) - mem_0_2.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) - mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_3.CE <= RW0_clk - mem_0_3.A <= RW0_addr - node RW0_rdata_0_3 = bits(mem_0_3.O, 0, 0) - mem_0_3.I <= bits(RW0_wdata, 3, 3) - mem_0_3.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) - mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_4.CE <= RW0_clk - mem_0_4.A <= RW0_addr - node RW0_rdata_0_4 = bits(mem_0_4.O, 0, 0) - mem_0_4.I <= bits(RW0_wdata, 4, 4) - mem_0_4.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) - mem_0_4.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_5.CE <= RW0_clk - mem_0_5.A <= RW0_addr - node RW0_rdata_0_5 = bits(mem_0_5.O, 0, 0) - mem_0_5.I <= bits(RW0_wdata, 5, 5) - mem_0_5.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) - mem_0_5.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_6.CE <= RW0_clk - mem_0_6.A <= RW0_addr - node RW0_rdata_0_6 = bits(mem_0_6.O, 0, 0) - mem_0_6.I <= bits(RW0_wdata, 6, 6) - mem_0_6.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) - mem_0_6.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_7.CE <= RW0_clk - mem_0_7.A <= RW0_addr - node RW0_rdata_0_7 = bits(mem_0_7.O, 0, 0) - mem_0_7.I <= bits(RW0_wdata, 7, 7) - mem_0_7.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) - mem_0_7.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_8.CE <= RW0_clk - mem_0_8.A <= RW0_addr - node RW0_rdata_0_8 = bits(mem_0_8.O, 0, 0) - mem_0_8.I <= bits(RW0_wdata, 8, 8) - mem_0_8.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_8.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 8, 8)), UInt<1>("h1"))) - mem_0_8.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_9.CE <= RW0_clk - mem_0_9.A <= RW0_addr - node RW0_rdata_0_9 = bits(mem_0_9.O, 0, 0) - mem_0_9.I <= bits(RW0_wdata, 9, 9) - mem_0_9.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_9.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 9, 9)), UInt<1>("h1"))) - mem_0_9.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_10.CE <= RW0_clk - mem_0_10.A <= RW0_addr - node RW0_rdata_0_10 = bits(mem_0_10.O, 0, 0) - mem_0_10.I <= bits(RW0_wdata, 10, 10) - mem_0_10.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_10.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 10, 10)), UInt<1>("h1"))) - mem_0_10.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_11.CE <= RW0_clk - mem_0_11.A <= RW0_addr - node RW0_rdata_0_11 = bits(mem_0_11.O, 0, 0) - mem_0_11.I <= bits(RW0_wdata, 11, 11) - mem_0_11.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_11.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 11, 11)), UInt<1>("h1"))) - mem_0_11.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_12.CE <= RW0_clk - mem_0_12.A <= RW0_addr - node RW0_rdata_0_12 = bits(mem_0_12.O, 0, 0) - mem_0_12.I <= bits(RW0_wdata, 12, 12) - mem_0_12.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_12.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 12, 12)), UInt<1>("h1"))) - mem_0_12.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_13.CE <= RW0_clk - mem_0_13.A <= RW0_addr - node RW0_rdata_0_13 = bits(mem_0_13.O, 0, 0) - mem_0_13.I <= bits(RW0_wdata, 13, 13) - mem_0_13.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_13.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 13, 13)), UInt<1>("h1"))) - mem_0_13.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_14.CE <= RW0_clk - mem_0_14.A <= RW0_addr - node RW0_rdata_0_14 = bits(mem_0_14.O, 0, 0) - mem_0_14.I <= bits(RW0_wdata, 14, 14) - mem_0_14.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_14.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 14, 14)), UInt<1>("h1"))) - mem_0_14.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_15.CE <= RW0_clk - mem_0_15.A <= RW0_addr - node RW0_rdata_0_15 = bits(mem_0_15.O, 0, 0) - mem_0_15.I <= bits(RW0_wdata, 15, 15) - mem_0_15.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_15.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 15, 15)), UInt<1>("h1"))) - mem_0_15.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_16.CE <= RW0_clk - mem_0_16.A <= RW0_addr - node RW0_rdata_0_16 = bits(mem_0_16.O, 0, 0) - mem_0_16.I <= bits(RW0_wdata, 16, 16) - mem_0_16.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_16.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 16, 16)), UInt<1>("h1"))) - mem_0_16.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_17.CE <= RW0_clk - mem_0_17.A <= RW0_addr - node RW0_rdata_0_17 = bits(mem_0_17.O, 0, 0) - mem_0_17.I <= bits(RW0_wdata, 17, 17) - mem_0_17.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_17.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 17, 17)), UInt<1>("h1"))) - mem_0_17.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_18.CE <= RW0_clk - mem_0_18.A <= RW0_addr - node RW0_rdata_0_18 = bits(mem_0_18.O, 0, 0) - mem_0_18.I <= bits(RW0_wdata, 18, 18) - mem_0_18.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_18.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 18, 18)), UInt<1>("h1"))) - mem_0_18.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_19.CE <= RW0_clk - mem_0_19.A <= RW0_addr - node RW0_rdata_0_19 = bits(mem_0_19.O, 0, 0) - mem_0_19.I <= bits(RW0_wdata, 19, 19) - mem_0_19.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_19.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 19, 19)), UInt<1>("h1"))) - mem_0_19.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_20.CE <= RW0_clk - mem_0_20.A <= RW0_addr - node RW0_rdata_0_20 = bits(mem_0_20.O, 0, 0) - mem_0_20.I <= bits(RW0_wdata, 20, 20) - mem_0_20.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_20.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 20, 20)), UInt<1>("h1"))) - mem_0_20.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_21.CE <= RW0_clk - mem_0_21.A <= RW0_addr - node RW0_rdata_0_21 = bits(mem_0_21.O, 0, 0) - mem_0_21.I <= bits(RW0_wdata, 21, 21) - mem_0_21.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_21.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 21, 21)), UInt<1>("h1"))) - mem_0_21.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_22.CE <= RW0_clk - mem_0_22.A <= RW0_addr - node RW0_rdata_0_22 = bits(mem_0_22.O, 0, 0) - mem_0_22.I <= bits(RW0_wdata, 22, 22) - mem_0_22.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_22.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 22, 22)), UInt<1>("h1"))) - mem_0_22.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_23.CE <= RW0_clk - mem_0_23.A <= RW0_addr - node RW0_rdata_0_23 = bits(mem_0_23.O, 0, 0) - mem_0_23.I <= bits(RW0_wdata, 23, 23) - mem_0_23.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_23.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 23, 23)), UInt<1>("h1"))) - mem_0_23.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_24.CE <= RW0_clk - mem_0_24.A <= RW0_addr - node RW0_rdata_0_24 = bits(mem_0_24.O, 0, 0) - mem_0_24.I <= bits(RW0_wdata, 24, 24) - mem_0_24.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_24.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 24, 24)), UInt<1>("h1"))) - mem_0_24.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_25.CE <= RW0_clk - mem_0_25.A <= RW0_addr - node RW0_rdata_0_25 = bits(mem_0_25.O, 0, 0) - mem_0_25.I <= bits(RW0_wdata, 25, 25) - mem_0_25.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_25.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 25, 25)), UInt<1>("h1"))) - mem_0_25.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_26.CE <= RW0_clk - mem_0_26.A <= RW0_addr - node RW0_rdata_0_26 = bits(mem_0_26.O, 0, 0) - mem_0_26.I <= bits(RW0_wdata, 26, 26) - mem_0_26.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_26.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 26, 26)), UInt<1>("h1"))) - mem_0_26.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_27.CE <= RW0_clk - mem_0_27.A <= RW0_addr - node RW0_rdata_0_27 = bits(mem_0_27.O, 0, 0) - mem_0_27.I <= bits(RW0_wdata, 27, 27) - mem_0_27.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_27.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 27, 27)), UInt<1>("h1"))) - mem_0_27.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_28.CE <= RW0_clk - mem_0_28.A <= RW0_addr - node RW0_rdata_0_28 = bits(mem_0_28.O, 0, 0) - mem_0_28.I <= bits(RW0_wdata, 28, 28) - mem_0_28.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_28.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 28, 28)), UInt<1>("h1"))) - mem_0_28.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_29.CE <= RW0_clk - mem_0_29.A <= RW0_addr - node RW0_rdata_0_29 = bits(mem_0_29.O, 0, 0) - mem_0_29.I <= bits(RW0_wdata, 29, 29) - mem_0_29.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_29.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 29, 29)), UInt<1>("h1"))) - mem_0_29.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_30.CE <= RW0_clk - mem_0_30.A <= RW0_addr - node RW0_rdata_0_30 = bits(mem_0_30.O, 0, 0) - mem_0_30.I <= bits(RW0_wdata, 30, 30) - mem_0_30.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_30.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 30, 30)), UInt<1>("h1"))) - mem_0_30.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_31.CE <= RW0_clk - mem_0_31.A <= RW0_addr - node RW0_rdata_0_31 = bits(mem_0_31.O, 0, 0) - mem_0_31.I <= bits(RW0_wdata, 31, 31) - mem_0_31.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_31.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 31, 31)), UInt<1>("h1"))) - mem_0_31.CSB <= not(and(RW0_en, UInt<1>("h1"))) - node RW0_rdata_0 = cat(RW0_rdata_0_31, cat(RW0_rdata_0_30, cat(RW0_rdata_0_29, cat(RW0_rdata_0_28, cat(RW0_rdata_0_27, cat(RW0_rdata_0_26, cat(RW0_rdata_0_25, cat(RW0_rdata_0_24, cat(RW0_rdata_0_23, cat(RW0_rdata_0_22, cat(RW0_rdata_0_21, cat(RW0_rdata_0_20, cat(RW0_rdata_0_19, cat(RW0_rdata_0_18, cat(RW0_rdata_0_17, cat(RW0_rdata_0_16, cat(RW0_rdata_0_15, cat(RW0_rdata_0_14, cat(RW0_rdata_0_13, cat(RW0_rdata_0_12, cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))))))))))))))))))))))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<32>("h0")) - - module smem_0_ext : - input RW0_addr : UInt<6> - input RW0_clk : Clock - input RW0_wdata : UInt<32> - output RW0_rdata : UInt<32> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - input RW0_wmask : UInt<32> - - inst mem_0_0 of my_sram_1rw_64x8 - inst mem_0_1 of my_sram_1rw_64x8 - inst mem_0_2 of my_sram_1rw_64x8 - inst mem_0_3 of my_sram_1rw_64x8 - inst mem_0_4 of my_sram_1rw_64x8 - inst mem_0_5 of my_sram_1rw_64x8 - inst mem_0_6 of my_sram_1rw_64x8 - inst mem_0_7 of my_sram_1rw_64x8 - inst mem_0_8 of my_sram_1rw_64x8 - inst mem_0_9 of my_sram_1rw_64x8 - inst mem_0_10 of my_sram_1rw_64x8 - inst mem_0_11 of my_sram_1rw_64x8 - inst mem_0_12 of my_sram_1rw_64x8 - inst mem_0_13 of my_sram_1rw_64x8 - inst mem_0_14 of my_sram_1rw_64x8 - inst mem_0_15 of my_sram_1rw_64x8 - inst mem_0_16 of my_sram_1rw_64x8 - inst mem_0_17 of my_sram_1rw_64x8 - inst mem_0_18 of my_sram_1rw_64x8 - inst mem_0_19 of my_sram_1rw_64x8 - inst mem_0_20 of my_sram_1rw_64x8 - inst mem_0_21 of my_sram_1rw_64x8 - inst mem_0_22 of my_sram_1rw_64x8 - inst mem_0_23 of my_sram_1rw_64x8 - inst mem_0_24 of my_sram_1rw_64x8 - inst mem_0_25 of my_sram_1rw_64x8 - inst mem_0_26 of my_sram_1rw_64x8 - inst mem_0_27 of my_sram_1rw_64x8 - inst mem_0_28 of my_sram_1rw_64x8 - inst mem_0_29 of my_sram_1rw_64x8 - inst mem_0_30 of my_sram_1rw_64x8 - inst mem_0_31 of my_sram_1rw_64x8 - mem_0_0.CE <= RW0_clk - mem_0_0.A <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.O, 0, 0) - mem_0_0.I <= bits(RW0_wdata, 0, 0) - mem_0_0.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_0.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1"))) - mem_0_0.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_1.CE <= RW0_clk - mem_0_1.A <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.O, 0, 0) - mem_0_1.I <= bits(RW0_wdata, 1, 1) - mem_0_1.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_1.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1"))) - mem_0_1.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_2.CE <= RW0_clk - mem_0_2.A <= RW0_addr - node RW0_rdata_0_2 = bits(mem_0_2.O, 0, 0) - mem_0_2.I <= bits(RW0_wdata, 2, 2) - mem_0_2.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_2.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1"))) - mem_0_2.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_3.CE <= RW0_clk - mem_0_3.A <= RW0_addr - node RW0_rdata_0_3 = bits(mem_0_3.O, 0, 0) - mem_0_3.I <= bits(RW0_wdata, 3, 3) - mem_0_3.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_3.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1"))) - mem_0_3.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_4.CE <= RW0_clk - mem_0_4.A <= RW0_addr - node RW0_rdata_0_4 = bits(mem_0_4.O, 0, 0) - mem_0_4.I <= bits(RW0_wdata, 4, 4) - mem_0_4.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_4.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1"))) - mem_0_4.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_5.CE <= RW0_clk - mem_0_5.A <= RW0_addr - node RW0_rdata_0_5 = bits(mem_0_5.O, 0, 0) - mem_0_5.I <= bits(RW0_wdata, 5, 5) - mem_0_5.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_5.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1"))) - mem_0_5.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_6.CE <= RW0_clk - mem_0_6.A <= RW0_addr - node RW0_rdata_0_6 = bits(mem_0_6.O, 0, 0) - mem_0_6.I <= bits(RW0_wdata, 6, 6) - mem_0_6.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_6.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1"))) - mem_0_6.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_7.CE <= RW0_clk - mem_0_7.A <= RW0_addr - node RW0_rdata_0_7 = bits(mem_0_7.O, 0, 0) - mem_0_7.I <= bits(RW0_wdata, 7, 7) - mem_0_7.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_7.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1"))) - mem_0_7.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_8.CE <= RW0_clk - mem_0_8.A <= RW0_addr - node RW0_rdata_0_8 = bits(mem_0_8.O, 0, 0) - mem_0_8.I <= bits(RW0_wdata, 8, 8) - mem_0_8.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_8.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 8, 8)), UInt<1>("h1"))) - mem_0_8.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_9.CE <= RW0_clk - mem_0_9.A <= RW0_addr - node RW0_rdata_0_9 = bits(mem_0_9.O, 0, 0) - mem_0_9.I <= bits(RW0_wdata, 9, 9) - mem_0_9.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_9.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 9, 9)), UInt<1>("h1"))) - mem_0_9.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_10.CE <= RW0_clk - mem_0_10.A <= RW0_addr - node RW0_rdata_0_10 = bits(mem_0_10.O, 0, 0) - mem_0_10.I <= bits(RW0_wdata, 10, 10) - mem_0_10.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_10.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 10, 10)), UInt<1>("h1"))) - mem_0_10.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_11.CE <= RW0_clk - mem_0_11.A <= RW0_addr - node RW0_rdata_0_11 = bits(mem_0_11.O, 0, 0) - mem_0_11.I <= bits(RW0_wdata, 11, 11) - mem_0_11.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_11.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 11, 11)), UInt<1>("h1"))) - mem_0_11.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_12.CE <= RW0_clk - mem_0_12.A <= RW0_addr - node RW0_rdata_0_12 = bits(mem_0_12.O, 0, 0) - mem_0_12.I <= bits(RW0_wdata, 12, 12) - mem_0_12.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_12.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 12, 12)), UInt<1>("h1"))) - mem_0_12.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_13.CE <= RW0_clk - mem_0_13.A <= RW0_addr - node RW0_rdata_0_13 = bits(mem_0_13.O, 0, 0) - mem_0_13.I <= bits(RW0_wdata, 13, 13) - mem_0_13.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_13.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 13, 13)), UInt<1>("h1"))) - mem_0_13.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_14.CE <= RW0_clk - mem_0_14.A <= RW0_addr - node RW0_rdata_0_14 = bits(mem_0_14.O, 0, 0) - mem_0_14.I <= bits(RW0_wdata, 14, 14) - mem_0_14.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_14.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 14, 14)), UInt<1>("h1"))) - mem_0_14.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_15.CE <= RW0_clk - mem_0_15.A <= RW0_addr - node RW0_rdata_0_15 = bits(mem_0_15.O, 0, 0) - mem_0_15.I <= bits(RW0_wdata, 15, 15) - mem_0_15.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_15.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 15, 15)), UInt<1>("h1"))) - mem_0_15.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_16.CE <= RW0_clk - mem_0_16.A <= RW0_addr - node RW0_rdata_0_16 = bits(mem_0_16.O, 0, 0) - mem_0_16.I <= bits(RW0_wdata, 16, 16) - mem_0_16.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_16.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 16, 16)), UInt<1>("h1"))) - mem_0_16.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_17.CE <= RW0_clk - mem_0_17.A <= RW0_addr - node RW0_rdata_0_17 = bits(mem_0_17.O, 0, 0) - mem_0_17.I <= bits(RW0_wdata, 17, 17) - mem_0_17.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_17.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 17, 17)), UInt<1>("h1"))) - mem_0_17.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_18.CE <= RW0_clk - mem_0_18.A <= RW0_addr - node RW0_rdata_0_18 = bits(mem_0_18.O, 0, 0) - mem_0_18.I <= bits(RW0_wdata, 18, 18) - mem_0_18.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_18.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 18, 18)), UInt<1>("h1"))) - mem_0_18.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_19.CE <= RW0_clk - mem_0_19.A <= RW0_addr - node RW0_rdata_0_19 = bits(mem_0_19.O, 0, 0) - mem_0_19.I <= bits(RW0_wdata, 19, 19) - mem_0_19.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_19.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 19, 19)), UInt<1>("h1"))) - mem_0_19.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_20.CE <= RW0_clk - mem_0_20.A <= RW0_addr - node RW0_rdata_0_20 = bits(mem_0_20.O, 0, 0) - mem_0_20.I <= bits(RW0_wdata, 20, 20) - mem_0_20.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_20.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 20, 20)), UInt<1>("h1"))) - mem_0_20.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_21.CE <= RW0_clk - mem_0_21.A <= RW0_addr - node RW0_rdata_0_21 = bits(mem_0_21.O, 0, 0) - mem_0_21.I <= bits(RW0_wdata, 21, 21) - mem_0_21.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_21.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 21, 21)), UInt<1>("h1"))) - mem_0_21.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_22.CE <= RW0_clk - mem_0_22.A <= RW0_addr - node RW0_rdata_0_22 = bits(mem_0_22.O, 0, 0) - mem_0_22.I <= bits(RW0_wdata, 22, 22) - mem_0_22.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_22.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 22, 22)), UInt<1>("h1"))) - mem_0_22.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_23.CE <= RW0_clk - mem_0_23.A <= RW0_addr - node RW0_rdata_0_23 = bits(mem_0_23.O, 0, 0) - mem_0_23.I <= bits(RW0_wdata, 23, 23) - mem_0_23.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_23.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 23, 23)), UInt<1>("h1"))) - mem_0_23.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_24.CE <= RW0_clk - mem_0_24.A <= RW0_addr - node RW0_rdata_0_24 = bits(mem_0_24.O, 0, 0) - mem_0_24.I <= bits(RW0_wdata, 24, 24) - mem_0_24.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_24.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 24, 24)), UInt<1>("h1"))) - mem_0_24.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_25.CE <= RW0_clk - mem_0_25.A <= RW0_addr - node RW0_rdata_0_25 = bits(mem_0_25.O, 0, 0) - mem_0_25.I <= bits(RW0_wdata, 25, 25) - mem_0_25.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_25.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 25, 25)), UInt<1>("h1"))) - mem_0_25.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_26.CE <= RW0_clk - mem_0_26.A <= RW0_addr - node RW0_rdata_0_26 = bits(mem_0_26.O, 0, 0) - mem_0_26.I <= bits(RW0_wdata, 26, 26) - mem_0_26.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_26.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 26, 26)), UInt<1>("h1"))) - mem_0_26.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_27.CE <= RW0_clk - mem_0_27.A <= RW0_addr - node RW0_rdata_0_27 = bits(mem_0_27.O, 0, 0) - mem_0_27.I <= bits(RW0_wdata, 27, 27) - mem_0_27.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_27.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 27, 27)), UInt<1>("h1"))) - mem_0_27.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_28.CE <= RW0_clk - mem_0_28.A <= RW0_addr - node RW0_rdata_0_28 = bits(mem_0_28.O, 0, 0) - mem_0_28.I <= bits(RW0_wdata, 28, 28) - mem_0_28.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_28.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 28, 28)), UInt<1>("h1"))) - mem_0_28.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_29.CE <= RW0_clk - mem_0_29.A <= RW0_addr - node RW0_rdata_0_29 = bits(mem_0_29.O, 0, 0) - mem_0_29.I <= bits(RW0_wdata, 29, 29) - mem_0_29.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_29.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 29, 29)), UInt<1>("h1"))) - mem_0_29.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_30.CE <= RW0_clk - mem_0_30.A <= RW0_addr - node RW0_rdata_0_30 = bits(mem_0_30.O, 0, 0) - mem_0_30.I <= bits(RW0_wdata, 30, 30) - mem_0_30.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_30.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 30, 30)), UInt<1>("h1"))) - mem_0_30.CSB <= not(and(RW0_en, UInt<1>("h1"))) - mem_0_31.CE <= RW0_clk - mem_0_31.A <= RW0_addr - node RW0_rdata_0_31 = bits(mem_0_31.O, 0, 0) - mem_0_31.I <= bits(RW0_wdata, 31, 31) - mem_0_31.OEB <= not(and(and(not(RW0_wmode), RW0_en), UInt<1>("h1"))) - mem_0_31.WEB <= not(and(and(RW0_wmode, bits(RW0_wmask, 31, 31)), UInt<1>("h1"))) - mem_0_31.CSB <= not(and(RW0_en, UInt<1>("h1"))) - node RW0_rdata_0 = cat(RW0_rdata_0_31, cat(RW0_rdata_0_30, cat(RW0_rdata_0_29, cat(RW0_rdata_0_28, cat(RW0_rdata_0_27, cat(RW0_rdata_0_26, cat(RW0_rdata_0_25, cat(RW0_rdata_0_24, cat(RW0_rdata_0_23, cat(RW0_rdata_0_22, cat(RW0_rdata_0_21, cat(RW0_rdata_0_20, cat(RW0_rdata_0_19, cat(RW0_rdata_0_18, cat(RW0_rdata_0_17, cat(RW0_rdata_0_16, cat(RW0_rdata_0_15, cat(RW0_rdata_0_14, cat(RW0_rdata_0_13, cat(RW0_rdata_0_12, cat(RW0_rdata_0_11, cat(RW0_rdata_0_10, cat(RW0_rdata_0_9, cat(RW0_rdata_0_8, cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))))))))))))))))))))))))))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<32>("h0")) - - extmodule my_sram_1rw_64x8 : - input A : UInt<6> - input CE : Clock - input I : UInt<8> - output O : UInt<8> - input CSB : UInt<1> - input OEB : UInt<1> - input WEB : UInt<1> - - defname = my_sram_1rw_64x8 -""" - - it should "compile, execute and test the boom test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class SmallTagArrayTest extends MacroCompilerSpec with HasSRAMGenerator with HasSimpleTestGenerator { - // Test that mapping a smaller memory using a larger lib can still work. - override def memWidth: Int = 26 - override def memDepth: BigInt = BigInt(2) - override def memMaskGran: Option[Int] = Some(26) - override def memPortPrefix: String = "" - - override def libWidth: Int = 32 - override def libDepth: BigInt = BigInt(64) - override def libMaskGran: Option[Int] = Some(1) - override def libPortPrefix: String = "" - - override def extraPorts: Seq[MacroExtraPort] = Seq( - MacroExtraPort(name = "must_be_one", portType = Constant, width = 1, value = 1) - ) - - override def generateBody(): String = - s""" - | inst mem_0_0 of $lib_name - | mem_0_0.must_be_one <= UInt<1>("h1") - | mem_0_0.clk <= clk - | mem_0_0.addr <= addr - | node dout_0_0 = bits(mem_0_0.dout, 25, 0) - | mem_0_0.din <= bits(din, 25, 0) - | mem_0_0.mask <= cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(UInt<1>("h0"), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), cat(bits(mask, 0, 0), bits(mask, 0, 0)))))))))))))))))))))))))))))))) - | mem_0_0.write_en <= and(and(write_en, UInt<1>("h1")), UInt<1>("h1")) - | node dout_0 = dout_0_0 - | dout <= mux(UInt<1>("h1"), dout_0, UInt<26>("h0")) - """.stripMargin - - it should "compile, execute, and test, the small tag array test" in { - compileExecuteAndTest(mem, lib, v, output) - } -} - -class RocketChipTest extends MacroCompilerSpec with HasSRAMGenerator { - val mem = s"mem-RocketChipTest.json" - val lib = s"lib-RocketChipTest.json" - val v = s"RocketChipTest.v" - - val libSRAMs = Seq( - SRAMMacro( - name = "SRAM1RW1024x8", - depth = 1024, - width = 8, - family = "1rw", - ports = Seq( - generateReadWritePort("", 8, BigInt(1024)) - ) - ), - SRAMMacro( - name = "SRAM1RW512x32", - depth = 512, - width = 32, - family = "1rw", - ports = Seq( - generateReadWritePort("", 32, BigInt(512)) - ) - ), - SRAMMacro( - name = "SRAM1RW64x128", - depth = 64, - width = 128, - family = "1rw", - ports = Seq( - generateReadWritePort("", 128, BigInt(64)) - ) - ), - SRAMMacro( - name = "SRAM1RW64x32", - depth = 64, - width = 32, - family = "1rw", - ports = Seq( - generateReadWritePort("", 32, BigInt(64)) - ) - ), - SRAMMacro( - name = "SRAM1RW64x8", - depth = 64, - width = 8, - family = "1rw", - ports = Seq( - generateReadWritePort("", 8, BigInt(64)) - ) - ), - SRAMMacro( - name = "SRAM1RW512x8", - depth = 512, - width = 8, - family = "1rw", - ports = Seq( - generateReadWritePort("", 8, BigInt(512)) - ) - ), - SRAMMacro( - name = "SRAM2RW64x32", - depth = 64, - width = 32, - family = "1r1w", - ports = Seq( - generateReadPort("portA", 32, BigInt(64)), - generateWritePort("portB", 32, BigInt(64)) - ) - ) - ) - - val memSRAMs: Seq[mdf.macrolib.Macro] = mdf.macrolib.Utils - .readMDFFromString(""" -[ - { - "type": "sram", - "name": "tag_array_ext", - "depth": 64, - "width": 80, - "ports": [ - { - "clock port name": "RW0_clk", - "mask granularity": 20, - "output port name": "RW0_rdata", - "input port name": "RW0_wdata", - "address port name": "RW0_addr", - "mask port name": "RW0_wmask", - "chip enable port name": "RW0_en", - "write enable port name": "RW0_wmode" - } - ] - }, - { - "type": "sram", - "name": "T_1090_ext", - "depth": 512, - "width": 64, - "ports": [ - { - "clock port name": "RW0_clk", - "output port name": "RW0_rdata", - "input port name": "RW0_wdata", - "address port name": "RW0_addr", - "chip enable port name": "RW0_en", - "write enable port name": "RW0_wmode" - } - ] - }, - { - "type": "sram", - "name": "T_406_ext", - "depth": 512, - "width": 64, - "ports": [ - { - "clock port name": "RW0_clk", - "mask granularity": 8, - "output port name": "RW0_rdata", - "input port name": "RW0_wdata", - "address port name": "RW0_addr", - "mask port name": "RW0_wmask", - "chip enable port name": "RW0_en", - "write enable port name": "RW0_wmode" - } - ] - }, - { - "type": "sram", - "name": "T_2172_ext", - "depth": 64, - "width": 88, - "ports": [ - { - "clock port name": "W0_clk", - "mask granularity": 22, - "input port name": "W0_data", - "address port name": "W0_addr", - "chip enable port name": "W0_en", - "mask port name": "W0_mask" - }, - { - "clock port name": "R0_clk", - "output port name": "R0_data", - "address port name": "R0_addr", - "chip enable port name": "R0_en" - } - ] - } -] -""").getOrElse(List()) - - writeToLib(lib, libSRAMs) - writeToMem(mem, memSRAMs) - - val output = // TODO: check correctness... - """ -circuit T_2172_ext : - module tag_array_ext : - input RW0_addr : UInt<6> - input RW0_clk : Clock - input RW0_wdata : UInt<80> - output RW0_rdata : UInt<80> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - input RW0_wmask : UInt<4> - - inst mem_0_0 of SRAM1RW64x32 - inst mem_0_1 of SRAM1RW64x32 - inst mem_0_2 of SRAM1RW64x32 - inst mem_0_3 of SRAM1RW64x32 - mem_0_0.clk <= RW0_clk - mem_0_0.addr <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.dout, 19, 0) - mem_0_0.din <= bits(RW0_wdata, 19, 0) - mem_0_0.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) - mem_0_1.clk <= RW0_clk - mem_0_1.addr <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.dout, 19, 0) - mem_0_1.din <= bits(RW0_wdata, 39, 20) - mem_0_1.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) - mem_0_2.clk <= RW0_clk - mem_0_2.addr <= RW0_addr - node RW0_rdata_0_2 = bits(mem_0_2.dout, 19, 0) - mem_0_2.din <= bits(RW0_wdata, 59, 40) - mem_0_2.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) - mem_0_3.clk <= RW0_clk - mem_0_3.addr <= RW0_addr - node RW0_rdata_0_3 = bits(mem_0_3.dout, 19, 0) - mem_0_3.din <= bits(RW0_wdata, 79, 60) - mem_0_3.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) - node RW0_rdata_0 = cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<80>("h0")) - - extmodule SRAM1RW64x32 : - input addr : UInt<6> - input clk : Clock - input din : UInt<32> - output dout : UInt<32> - input write_en : UInt<1> - - defname = SRAM1RW64x32 - - module T_1090_ext : - input RW0_addr : UInt<9> - input RW0_clk : Clock - input RW0_wdata : UInt<64> - output RW0_rdata : UInt<64> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - - inst mem_0_0 of SRAM1RW512x32 - inst mem_0_1 of SRAM1RW512x32 - mem_0_0.clk <= RW0_clk - mem_0_0.addr <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.dout, 31, 0) - mem_0_0.din <= bits(RW0_wdata, 31, 0) - mem_0_0.write_en <= and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1")) - mem_0_1.clk <= RW0_clk - mem_0_1.addr <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.dout, 31, 0) - mem_0_1.din <= bits(RW0_wdata, 63, 32) - mem_0_1.write_en <= and(and(RW0_wmode, UInt<1>("h1")), UInt<1>("h1")) - node RW0_rdata_0 = cat(RW0_rdata_0_1, RW0_rdata_0_0) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<64>("h0")) - - extmodule SRAM1RW512x32 : - input addr : UInt<9> - input clk : Clock - input din : UInt<32> - output dout : UInt<32> - input write_en : UInt<1> - - defname = SRAM1RW512x32 - - - module T_406_ext : - input RW0_addr : UInt<9> - input RW0_clk : Clock - input RW0_wdata : UInt<64> - output RW0_rdata : UInt<64> - input RW0_en : UInt<1> - input RW0_wmode : UInt<1> - input RW0_wmask : UInt<8> - - inst mem_0_0 of SRAM1RW512x8 - inst mem_0_1 of SRAM1RW512x8 - inst mem_0_2 of SRAM1RW512x8 - inst mem_0_3 of SRAM1RW512x8 - inst mem_0_4 of SRAM1RW512x8 - inst mem_0_5 of SRAM1RW512x8 - inst mem_0_6 of SRAM1RW512x8 - inst mem_0_7 of SRAM1RW512x8 - mem_0_0.clk <= RW0_clk - mem_0_0.addr <= RW0_addr - node RW0_rdata_0_0 = bits(mem_0_0.dout, 7, 0) - mem_0_0.din <= bits(RW0_wdata, 7, 0) - mem_0_0.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 0, 0)), UInt<1>("h1")) - mem_0_1.clk <= RW0_clk - mem_0_1.addr <= RW0_addr - node RW0_rdata_0_1 = bits(mem_0_1.dout, 7, 0) - mem_0_1.din <= bits(RW0_wdata, 15, 8) - mem_0_1.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 1, 1)), UInt<1>("h1")) - mem_0_2.clk <= RW0_clk - mem_0_2.addr <= RW0_addr - node RW0_rdata_0_2 = bits(mem_0_2.dout, 7, 0) - mem_0_2.din <= bits(RW0_wdata, 23, 16) - mem_0_2.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 2, 2)), UInt<1>("h1")) - mem_0_3.clk <= RW0_clk - mem_0_3.addr <= RW0_addr - node RW0_rdata_0_3 = bits(mem_0_3.dout, 7, 0) - mem_0_3.din <= bits(RW0_wdata, 31, 24) - mem_0_3.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 3, 3)), UInt<1>("h1")) - mem_0_4.clk <= RW0_clk - mem_0_4.addr <= RW0_addr - node RW0_rdata_0_4 = bits(mem_0_4.dout, 7, 0) - mem_0_4.din <= bits(RW0_wdata, 39, 32) - mem_0_4.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 4, 4)), UInt<1>("h1")) - mem_0_5.clk <= RW0_clk - mem_0_5.addr <= RW0_addr - node RW0_rdata_0_5 = bits(mem_0_5.dout, 7, 0) - mem_0_5.din <= bits(RW0_wdata, 47, 40) - mem_0_5.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 5, 5)), UInt<1>("h1")) - mem_0_6.clk <= RW0_clk - mem_0_6.addr <= RW0_addr - node RW0_rdata_0_6 = bits(mem_0_6.dout, 7, 0) - mem_0_6.din <= bits(RW0_wdata, 55, 48) - mem_0_6.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 6, 6)), UInt<1>("h1")) - mem_0_7.clk <= RW0_clk - mem_0_7.addr <= RW0_addr - node RW0_rdata_0_7 = bits(mem_0_7.dout, 7, 0) - mem_0_7.din <= bits(RW0_wdata, 63, 56) - mem_0_7.write_en <= and(and(RW0_wmode, bits(RW0_wmask, 7, 7)), UInt<1>("h1")) - node RW0_rdata_0 = cat(RW0_rdata_0_7, cat(RW0_rdata_0_6, cat(RW0_rdata_0_5, cat(RW0_rdata_0_4, cat(RW0_rdata_0_3, cat(RW0_rdata_0_2, cat(RW0_rdata_0_1, RW0_rdata_0_0))))))) - RW0_rdata <= mux(UInt<1>("h1"), RW0_rdata_0, UInt<64>("h0")) - - extmodule SRAM1RW512x8 : - input addr : UInt<9> - input clk : Clock - input din : UInt<8> - output dout : UInt<8> - input write_en : UInt<1> - - defname = SRAM1RW512x8 - - - module T_2172_ext : - input W0_addr : UInt<6> - input W0_clk : Clock - input W0_data : UInt<88> - input W0_en : UInt<1> - input W0_mask : UInt<4> - input R0_addr : UInt<6> - input R0_clk : Clock - output R0_data : UInt<88> - input R0_en : UInt<1> - - inst mem_0_0 of SRAM2RW64x32 - inst mem_0_1 of SRAM2RW64x32 - inst mem_0_2 of SRAM2RW64x32 - inst mem_0_3 of SRAM2RW64x32 - mem_0_0.portB_clk <= W0_clk - mem_0_0.portB_addr <= W0_addr - mem_0_0.portB_din <= bits(W0_data, 21, 0) - mem_0_0.portB_write_en <= and(and(UInt<1>("h1"), bits(W0_mask, 0, 0)), UInt<1>("h1")) - mem_0_1.portB_clk <= W0_clk - mem_0_1.portB_addr <= W0_addr - mem_0_1.portB_din <= bits(W0_data, 43, 22) - mem_0_1.portB_write_en <= and(and(UInt<1>("h1"), bits(W0_mask, 1, 1)), UInt<1>("h1")) - mem_0_2.portB_clk <= W0_clk - mem_0_2.portB_addr <= W0_addr - mem_0_2.portB_din <= bits(W0_data, 65, 44) - mem_0_2.portB_write_en <= and(and(UInt<1>("h1"), bits(W0_mask, 2, 2)), UInt<1>("h1")) - mem_0_3.portB_clk <= W0_clk - mem_0_3.portB_addr <= W0_addr - mem_0_3.portB_din <= bits(W0_data, 87, 66) - mem_0_3.portB_write_en <= and(and(UInt<1>("h1"), bits(W0_mask, 3, 3)), UInt<1>("h1")) - mem_0_0.portA_clk <= R0_clk - mem_0_0.portA_addr <= R0_addr - node R0_data_0_0 = bits(mem_0_0.portA_dout, 21, 0) - mem_0_1.portA_clk <= R0_clk - mem_0_1.portA_addr <= R0_addr - node R0_data_0_1 = bits(mem_0_1.portA_dout, 21, 0) - mem_0_2.portA_clk <= R0_clk - mem_0_2.portA_addr <= R0_addr - node R0_data_0_2 = bits(mem_0_2.portA_dout, 21, 0) - mem_0_3.portA_clk <= R0_clk - mem_0_3.portA_addr <= R0_addr - node R0_data_0_3 = bits(mem_0_3.portA_dout, 21, 0) - node R0_data_0 = cat(R0_data_0_3, cat(R0_data_0_2, cat(R0_data_0_1, R0_data_0_0))) - R0_data <= mux(UInt<1>("h1"), R0_data_0, UInt<88>("h0")) - - extmodule SRAM2RW64x32 : - input portA_addr : UInt<6> - input portA_clk : Clock - output portA_dout : UInt<32> - input portB_addr : UInt<6> - input portB_clk : Clock - input portB_din : UInt<32> - input portB_write_en : UInt<1> - - defname = SRAM2RW64x32 -""" - - // TODO FIXME: Enable this test when firrtl #644 https://github.com/freechipsproject/firrtl/issues/644 is fixed - "rocket example" should "work" in { - pending - } - //~ compileExecuteAndTest(mem, lib, v, output) -} diff --git a/src/test/scala/barstools/macros/SynFlops.scala b/src/test/scala/barstools/macros/SynFlops.scala deleted file mode 100644 index 16a3446c7..000000000 --- a/src/test/scala/barstools/macros/SynFlops.scala +++ /dev/null @@ -1,455 +0,0 @@ -package barstools.macros - -// Test flop synthesis of the memory compiler. - -trait HasSynFlopsTestGenerator extends HasSimpleTestGenerator { - this: MacroCompilerSpec with HasSRAMGenerator => - def generateFlops(): String = { - s""" - inst mem_0_0 of split_$lib_name - mem_0_0.${libPortPrefix}_clk <= ${libPortPrefix}_clk - mem_0_0.${libPortPrefix}_addr <= ${libPortPrefix}_addr - node ${libPortPrefix}_dout_0_0 = bits(mem_0_0.${libPortPrefix}_dout, ${libWidth - 1}, 0) - mem_0_0.${libPortPrefix}_din <= bits(${libPortPrefix}_din, ${libWidth - 1}, 0) - mem_0_0.${libPortPrefix}_write_en <= and(and(and(${libPortPrefix}_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - node ${libPortPrefix}_dout_0 = ${libPortPrefix}_dout_0_0 - ${libPortPrefix}_dout <= mux(UInt<1>("h1"), ${libPortPrefix}_dout_0, UInt<$libWidth>("h0")) - - module split_$lib_name : - input ${libPortPrefix}_addr : UInt<$lib_addr_width> - input ${libPortPrefix}_clk : Clock - input ${libPortPrefix}_din : UInt<$libWidth> - output ${libPortPrefix}_dout : UInt<$libWidth> - input ${libPortPrefix}_write_en : UInt<1> - - mem ram : - data-type => UInt<$libWidth> - depth => $libDepth - read-latency => 1 - write-latency => 1 - readwriter => RW_0 - read-under-write => undefined - ram.RW_0.clk <= ${libPortPrefix}_clk - ram.RW_0.addr <= ${libPortPrefix}_addr - ram.RW_0.en <= UInt<1>("h1") - ram.RW_0.wmode <= ${libPortPrefix}_write_en - ram.RW_0.wmask <= UInt<1>("h1") - ${libPortPrefix}_dout <= ram.RW_0.rdata - ram.RW_0.wdata <= ${libPortPrefix}_din -""" - } - - // If there is no lib, put the flops definition into the body. - abstract override def generateBody(): String = { - if (this.isInstanceOf[HasNoLibTestGenerator]) { - generateFlops() - } else { - super.generateBody() - } - } - - // If there is no lib, don't generate a footer, since the flops definition - // will be in the body. - override def generateFooter(): String = { - if (this.isInstanceOf[HasNoLibTestGenerator]) "" - else - s""" - module $lib_name : -${generateFooterPorts()} - -${generateFlops()} -""" - } - -} - -class Synflops2048x8_noLib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasNoLibTestGenerator - with HasSynFlopsTestGenerator { - override lazy val memDepth = BigInt(2048) - override lazy val memWidth = 8 - - compileExecuteAndTest(mem, None, v, output, synflops = true) -} - -class Synflops2048x16_noLib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasNoLibTestGenerator - with HasSynFlopsTestGenerator { - override lazy val memDepth = BigInt(2048) - override lazy val memWidth = 16 - - compileExecuteAndTest(mem, None, v, output, synflops = true) -} - -class Synflops8192x16_noLib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasNoLibTestGenerator - with HasSynFlopsTestGenerator { - override lazy val memDepth = BigInt(8192) - override lazy val memWidth = 16 - - compileExecuteAndTest(mem, None, v, output, synflops = true) -} - -class Synflops2048x16_depth_Lib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with HasSynFlopsTestGenerator { - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val width = 16 - - compileExecuteAndTest(mem, lib, v, output, synflops = true) -} - -class Synflops2048x64_width_Lib - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleWidthTestGenerator - with HasSynFlopsTestGenerator { - override lazy val memWidth = 64 - override lazy val libWidth = 8 - override lazy val depth = BigInt(1024) - - compileExecuteAndTest(mem, lib, v, output, synflops = true) -} - -class Synflops_SplitPorts_Read_Write - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with HasSynFlopsTestGenerator { - import mdf.macrolib._ - - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val width = 8 - - override def generateLibSRAM(): SRAMMacro = SRAMMacro( - name = lib_name, - width = width, - depth = libDepth, - family = "1r1w", - ports = Seq( - generateReadPort("innerA", width, libDepth), - generateWritePort("innerB", width, libDepth) - ) - ) - - override def generateMemSRAM(): SRAMMacro = SRAMMacro( - name = mem_name, - width = width, - depth = memDepth, - family = "1r1w", - ports = Seq( - generateReadPort("outerB", width, memDepth), - generateWritePort("outerA", width, memDepth) - ) - ) - - override def generateHeader() = - """ -circuit target_memory : - module target_memory : - input outerB_addr : UInt<11> - input outerB_clk : Clock - output outerB_dout : UInt<8> - input outerA_addr : UInt<11> - input outerA_clk : Clock - input outerA_din : UInt<8> - input outerA_write_en : UInt<1> -""" - - override def generateBody() = - """ - node outerB_addr_sel = bits(outerB_addr, 10, 10) - reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : - reset => (UInt<1>("h0"), outerB_addr_sel_reg) - outerB_addr_sel_reg <= mux(UInt<1>("h1"), outerB_addr_sel, outerB_addr_sel_reg) - node outerA_addr_sel = bits(outerA_addr, 10, 10) - inst mem_0_0 of awesome_lib_mem - mem_0_0.innerB_clk <= outerA_clk - mem_0_0.innerB_addr <= outerA_addr - mem_0_0.innerB_din <= bits(outerA_din, 7, 0) - mem_0_0.innerB_write_en <= and(and(and(outerA_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) - mem_0_0.innerA_clk <= outerB_clk - mem_0_0.innerA_addr <= outerB_addr - node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) - node outerB_dout_0 = outerB_dout_0_0 - inst mem_1_0 of awesome_lib_mem - mem_1_0.innerB_clk <= outerA_clk - mem_1_0.innerB_addr <= outerA_addr - mem_1_0.innerB_din <= bits(outerA_din, 7, 0) - mem_1_0.innerB_write_en <= and(and(and(outerA_write_en, UInt<1>("h1")), UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) - mem_1_0.innerA_clk <= outerB_clk - mem_1_0.innerA_addr <= outerB_addr - node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) - node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) -""" - - override def generateFooterPorts() = - """ - input innerA_addr : UInt<10> - input innerA_clk : Clock - output innerA_dout : UInt<8> - input innerB_addr : UInt<10> - input innerB_clk : Clock - input innerB_din : UInt<8> - input innerB_write_en : UInt<1> -""" - - override def generateFlops() = - """ - inst mem_0_0 of split_awesome_lib_mem - mem_0_0.innerB_clk <= innerB_clk - mem_0_0.innerB_addr <= innerB_addr - mem_0_0.innerB_din <= bits(innerB_din, 7, 0) - mem_0_0.innerB_write_en <= and(and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")), UInt<1>("h1")) - mem_0_0.innerA_clk <= innerA_clk - mem_0_0.innerA_addr <= innerA_addr - node innerA_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) - node innerA_dout_0 = innerA_dout_0_0 - innerA_dout <= mux(UInt<1>("h1"), innerA_dout_0, UInt<8>("h0")) - - module split_awesome_lib_mem : - input innerA_addr : UInt<10> - input innerA_clk : Clock - output innerA_dout : UInt<8> - input innerB_addr : UInt<10> - input innerB_clk : Clock - input innerB_din : UInt<8> - input innerB_write_en : UInt<1> - - mem ram : - data-type => UInt<8> - depth => 1024 - read-latency => 1 - write-latency => 1 - reader => R_0 - writer => W_0 - read-under-write => undefined - ram.R_0.clk <= innerA_clk - ram.R_0.addr <= innerA_addr - ram.R_0.en <= UInt<1>("h1") - innerA_dout <= ram.R_0.data - ram.W_0.clk <= innerB_clk - ram.W_0.addr <= innerB_addr - ram.W_0.en <= innerB_write_en - ram.W_0.mask <= UInt<1>("h1") - ram.W_0.data <= innerB_din -""" - - "Non-masked split lib; split mem" should "syn flops fine" in { - compileExecuteAndTest(mem, lib, v, output, synflops = true) - } -} - -class Synflops_SplitPorts_MaskedMem_Read_MaskedWrite - extends MacroCompilerSpec - with HasSRAMGenerator - with HasSimpleDepthTestGenerator - with HasSynFlopsTestGenerator { - import mdf.macrolib._ - - override lazy val memDepth = BigInt(2048) - override lazy val libDepth = BigInt(1024) - override lazy val width = 8 - override lazy val memMaskGran: Option[Int] = Some(8) - override lazy val libMaskGran: Option[Int] = Some(1) - - override def generateLibSRAM(): SRAMMacro = SRAMMacro( - name = lib_name, - width = width, - depth = libDepth, - family = "1r1w", - ports = Seq( - generateReadPort("innerA", width, libDepth), - generateWritePort("innerB", width, libDepth, libMaskGran) - ) - ) - - override def generateMemSRAM(): SRAMMacro = SRAMMacro( - name = mem_name, - width = width, - depth = memDepth, - family = "1r1w", - ports = Seq( - generateReadPort("outerB", width, memDepth), - generateWritePort("outerA", width, memDepth, memMaskGran) - ) - ) - - override def generateHeader() = - """ -circuit target_memory : - module target_memory : - input outerB_addr : UInt<11> - input outerB_clk : Clock - output outerB_dout : UInt<8> - input outerA_addr : UInt<11> - input outerA_clk : Clock - input outerA_din : UInt<8> - input outerA_write_en : UInt<1> - input outerA_mask : UInt<1> -""" - - override def generateBody() = - """ - node outerB_addr_sel = bits(outerB_addr, 10, 10) - reg outerB_addr_sel_reg : UInt<1>, outerB_clk with : - reset => (UInt<1>("h0"), outerB_addr_sel_reg) - outerB_addr_sel_reg <= mux(UInt<1>("h1"), outerB_addr_sel, outerB_addr_sel_reg) - node outerA_addr_sel = bits(outerA_addr, 10, 10) - inst mem_0_0 of awesome_lib_mem - mem_0_0.innerB_clk <= outerA_clk - mem_0_0.innerB_addr <= outerA_addr - mem_0_0.innerB_din <= bits(outerA_din, 7, 0) - mem_0_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) - mem_0_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h0"))) - mem_0_0.innerA_clk <= outerB_clk - mem_0_0.innerA_addr <= outerB_addr - node outerB_dout_0_0 = bits(mem_0_0.innerA_dout, 7, 0) - node outerB_dout_0 = outerB_dout_0_0 - inst mem_1_0 of awesome_lib_mem - mem_1_0.innerB_clk <= outerA_clk - mem_1_0.innerB_addr <= outerA_addr - mem_1_0.innerB_din <= bits(outerA_din, 7, 0) - mem_1_0.innerB_mask <= cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), cat(bits(outerA_mask, 0, 0), bits(outerA_mask, 0, 0)))))))) - mem_1_0.innerB_write_en <= and(and(outerA_write_en, UInt<1>("h1")), eq(outerA_addr_sel, UInt<1>("h1"))) - mem_1_0.innerA_clk <= outerB_clk - mem_1_0.innerA_addr <= outerB_addr - node outerB_dout_1_0 = bits(mem_1_0.innerA_dout, 7, 0) - node outerB_dout_1 = outerB_dout_1_0 - outerB_dout <= mux(eq(outerB_addr_sel_reg, UInt<1>("h0")), outerB_dout_0, mux(eq(outerB_addr_sel_reg, UInt<1>("h1")), outerB_dout_1, UInt<8>("h0"))) -""" - - override def generateFooterPorts() = - """ - input innerA_addr : UInt<10> - input innerA_clk : Clock - output innerA_dout : UInt<8> - input innerB_addr : UInt<10> - input innerB_clk : Clock - input innerB_din : UInt<8> - input innerB_write_en : UInt<1> - input innerB_mask : UInt<8> -""" - - override def generateFlops() = - """ - inst mem_0_0 of split_awesome_lib_mem - inst mem_0_1 of split_awesome_lib_mem - inst mem_0_2 of split_awesome_lib_mem - inst mem_0_3 of split_awesome_lib_mem - inst mem_0_4 of split_awesome_lib_mem - inst mem_0_5 of split_awesome_lib_mem - inst mem_0_6 of split_awesome_lib_mem - inst mem_0_7 of split_awesome_lib_mem - mem_0_0.innerB_clk <= innerB_clk - mem_0_0.innerB_addr <= innerB_addr - mem_0_0.innerB_din <= bits(innerB_din, 0, 0) - mem_0_0.innerB_mask <= bits(innerB_mask, 0, 0) - mem_0_0.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) - mem_0_1.innerB_clk <= innerB_clk - mem_0_1.innerB_addr <= innerB_addr - mem_0_1.innerB_din <= bits(innerB_din, 1, 1) - mem_0_1.innerB_mask <= bits(innerB_mask, 1, 1) - mem_0_1.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) - mem_0_2.innerB_clk <= innerB_clk - mem_0_2.innerB_addr <= innerB_addr - mem_0_2.innerB_din <= bits(innerB_din, 2, 2) - mem_0_2.innerB_mask <= bits(innerB_mask, 2, 2) - mem_0_2.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) - mem_0_3.innerB_clk <= innerB_clk - mem_0_3.innerB_addr <= innerB_addr - mem_0_3.innerB_din <= bits(innerB_din, 3, 3) - mem_0_3.innerB_mask <= bits(innerB_mask, 3, 3) - mem_0_3.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) - mem_0_4.innerB_clk <= innerB_clk - mem_0_4.innerB_addr <= innerB_addr - mem_0_4.innerB_din <= bits(innerB_din, 4, 4) - mem_0_4.innerB_mask <= bits(innerB_mask, 4, 4) - mem_0_4.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) - mem_0_5.innerB_clk <= innerB_clk - mem_0_5.innerB_addr <= innerB_addr - mem_0_5.innerB_din <= bits(innerB_din, 5, 5) - mem_0_5.innerB_mask <= bits(innerB_mask, 5, 5) - mem_0_5.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) - mem_0_6.innerB_clk <= innerB_clk - mem_0_6.innerB_addr <= innerB_addr - mem_0_6.innerB_din <= bits(innerB_din, 6, 6) - mem_0_6.innerB_mask <= bits(innerB_mask, 6, 6) - mem_0_6.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) - mem_0_7.innerB_clk <= innerB_clk - mem_0_7.innerB_addr <= innerB_addr - mem_0_7.innerB_din <= bits(innerB_din, 7, 7) - mem_0_7.innerB_mask <= bits(innerB_mask, 7, 7) - mem_0_7.innerB_write_en <= and(and(innerB_write_en, UInt<1>("h1")), UInt<1>("h1")) - mem_0_0.innerA_clk <= innerA_clk - mem_0_0.innerA_addr <= innerA_addr - node innerA_dout_0_0 = bits(mem_0_0.innerA_dout, 0, 0) - mem_0_1.innerA_clk <= innerA_clk - mem_0_1.innerA_addr <= innerA_addr - node innerA_dout_0_1 = bits(mem_0_1.innerA_dout, 0, 0) - mem_0_2.innerA_clk <= innerA_clk - mem_0_2.innerA_addr <= innerA_addr - node innerA_dout_0_2 = bits(mem_0_2.innerA_dout, 0, 0) - mem_0_3.innerA_clk <= innerA_clk - mem_0_3.innerA_addr <= innerA_addr - node innerA_dout_0_3 = bits(mem_0_3.innerA_dout, 0, 0) - mem_0_4.innerA_clk <= innerA_clk - mem_0_4.innerA_addr <= innerA_addr - node innerA_dout_0_4 = bits(mem_0_4.innerA_dout, 0, 0) - mem_0_5.innerA_clk <= innerA_clk - mem_0_5.innerA_addr <= innerA_addr - node innerA_dout_0_5 = bits(mem_0_5.innerA_dout, 0, 0) - mem_0_6.innerA_clk <= innerA_clk - mem_0_6.innerA_addr <= innerA_addr - node innerA_dout_0_6 = bits(mem_0_6.innerA_dout, 0, 0) - mem_0_7.innerA_clk <= innerA_clk - mem_0_7.innerA_addr <= innerA_addr - node innerA_dout_0_7 = bits(mem_0_7.innerA_dout, 0, 0) - node innerA_dout_0 = cat(innerA_dout_0_7, cat(innerA_dout_0_6, cat(innerA_dout_0_5, cat(innerA_dout_0_4, cat(innerA_dout_0_3, cat(innerA_dout_0_2, cat(innerA_dout_0_1, innerA_dout_0_0))))))) - innerA_dout <= mux(UInt<1>("h1"), innerA_dout_0, UInt<8>("h0")) - - - module split_awesome_lib_mem : - input innerA_addr : UInt<10> - input innerA_clk : Clock - output innerA_dout : UInt<1> - input innerB_addr : UInt<10> - input innerB_clk : Clock - input innerB_din : UInt<1> - input innerB_write_en : UInt<1> - input innerB_mask : UInt<1> - - mem ram : - data-type => UInt<1> - depth => 1024 - read-latency => 1 - write-latency => 1 - reader => R_0 - writer => W_0 - read-under-write => undefined - ram.R_0.clk <= innerA_clk - ram.R_0.addr <= innerA_addr - ram.R_0.en <= UInt<1>("h1") - innerA_dout <= ram.R_0.data - ram.W_0.clk <= innerB_clk - ram.W_0.addr <= innerB_addr - ram.W_0.en <= innerB_write_en - ram.W_0.mask <= innerB_mask - ram.W_0.data <= innerB_din -""" - - "masked split lib; masked split mem" should "syn flops fine" in { - compileExecuteAndTest(mem, lib, v, output, synflops = true) - } -} diff --git a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala b/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala deleted file mode 100644 index bb089de9a..000000000 --- a/src/test/scala/barstools/tapeout/transforms/GenerateSpec.scala +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package barstools.tapeout.transforms - -import chisel3._ -import chisel3.experimental.ExtModule -import chisel3.stage.ChiselStage -import firrtl.FileUtils -import org.scalatest.freespec.AnyFreeSpec -import org.scalatest.matchers.must.Matchers.be -import org.scalatest.matchers.should.Matchers.convertToAnyShouldWrapper - -import java.io.{File, PrintWriter} - -class BlackBoxInverter extends ExtModule { - val in = IO(Input(Bool())) - val out = IO(Output(Bool())) -} - -class GenerateExampleModule extends Module { - val in = IO(Input(Bool())) - val out = IO(Output(Bool())) - - val inverter = Module(new BlackBoxInverter) - inverter.in := in - val inverted = inverter.out - - val reg = RegInit(0.U(8.W)) - reg := reg + inverted.asUInt - out := reg -} - -class ToBeMadeExternal extends Module { - val in = IO(Input(Bool())) - val out = IO(Output(Bool())) - - val reg = RegInit(0.U(8.W)) - reg := reg + in.asUInt + 2.U - out := reg -} - -class GenerateExampleTester extends Module { - val success = IO(Output(Bool())) - - val mod = Module(new GenerateExampleModule) - mod.in := 1.U - - val mod2 = Module(new ToBeMadeExternal) - mod2.in := 1.U - - val reg = RegInit(0.U(8.W)) - reg := reg + mod.out + mod2.out - - success := reg === 100.U - - when(reg === 100.U) { - stop() - } -} - -class GenerateSpec extends AnyFreeSpec { - - def generateTestData(targetDir: String): Unit = { - FileUtils.makeDirectory(targetDir) - - new ChiselStage().emitFirrtl(new GenerateExampleTester, Array("--target-dir", targetDir)) - - val blackBoxInverterText = - """ - |module BlackBoxInverter( - | input [0:0] in, - | output [0:0] out - |); - | assign out = !in; - |endmodule - |""".stripMargin - - val printWriter2 = new PrintWriter(new File(s"$targetDir/BlackBoxInverter.v")) - printWriter2.write(blackBoxInverterText) - printWriter2.close() - } - - "generate test data" in { - val targetDir = "test_run_dir/generate_spec_source" - generateTestData(targetDir) - - new File(s"$targetDir/GenerateExampleTester.fir").exists() should be(true) - } - - "generate top test" in { - val targetDir = "test_run_dir/generate_spec" - generateTestData(targetDir) - - GenerateModelStageMain.main( - Array( - "-i", - s"$targetDir/GenerateExampleTester.fir", - "-o", - s"$targetDir/GenerateExampleTester.v" - ) - ) - new File(s"$targetDir/GenerateExampleTester.v").exists() should be(true) - } -} diff --git a/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala b/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala deleted file mode 100644 index a086b0b95..000000000 --- a/src/test/scala/barstools/tapeout/transforms/retime/RetimeSpec.scala +++ /dev/null @@ -1,119 +0,0 @@ -// See LICENSE for license details. - -package barstools.tapeout.transforms.retime - -import chisel3._ -import chisel3.stage.{ChiselGeneratorAnnotation, ChiselStage} -import firrtl.{EmittedFirrtlCircuitAnnotation, EmittedFirrtlModuleAnnotation, FileUtils} -import logger.Logger -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers - -class RetimeSpec extends AnyFlatSpec with Matchers { - def normalized(s: String): String = { - require(!s.contains("\n")) - s.replaceAll("\\s+", " ").trim - } - def uniqueDirName[T](gen: => T, name: String): String = { - val genClassName = gen.getClass.getName - name + genClassName.hashCode.abs - } - def getLowFirrtl[T <: RawModule](gen: () => T, extraArgs: Array[String] = Array.empty): String = { - // generate low firrtl - (new ChiselStage) - .execute( - Array("-X", "low") ++ extraArgs, - Seq(ChiselGeneratorAnnotation(gen)) - ) - .collect { - case EmittedFirrtlCircuitAnnotation(a) => a - case EmittedFirrtlModuleAnnotation(a) => a - } - .map(_.value) - .mkString("") - } - - behavior.of("retime library") - - it should "pass simple retime module annotation" in { - val gen = () => new RetimeModule - val dir = uniqueDirName(gen, "RetimeModule") - - Logger.makeScope(Seq.empty) { - val captor = new Logger.OutputCaptor - Logger.setOutput(captor.printStream) - - // generate low firrtl - val firrtl = getLowFirrtl( - gen, - Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info") - ) - - firrtl.nonEmpty should be(true) - //Make sure we got the RetimeTransform scheduled - captor.getOutputAsString should include("barstools.tapeout.transforms.retime.RetimeTransform") - } - - val lines = FileUtils - .getLines(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json") - .map(normalized) - .mkString("\n") - lines should include("barstools.tapeout.transforms.retime.RetimeAnnotation") - lines should include(""""target":"RetimeModule.RetimeModule"""") - } - - it should "pass simple retime instance annotation" in { - val gen = () => new RetimeInstance - val dir = uniqueDirName(gen, "RetimeInstance") - - Logger.makeScope(Seq.empty) { - val captor = new Logger.OutputCaptor - Logger.setOutput(captor.printStream) - - // generate low firrtl - val firrtl = getLowFirrtl( - gen, - Array("-td", s"test_run_dir/$dir", "-foaf", s"test_run_dir/$dir/final", "--log-level", "info") - ) - - firrtl.nonEmpty should be(true) - //Make sure we got the RetimeTransform scheduled - captor.getOutputAsString should include("barstools.tapeout.transforms.retime.RetimeTransform") - } - - val lines = FileUtils - .getLines(s"test_run_dir/$dir/test_run_dir/$dir/final.anno.json") - .map(normalized) - .mkString("\n") - lines should include("barstools.tapeout.transforms.retime.RetimeAnnotation") - lines should include(""""target":"RetimeInstance.MyModule"""") - } -} - -class RetimeModule extends Module with RetimeLib { - val io = IO(new Bundle { - val in = Input(UInt(15.W)) - val out = Output(UInt(15.W)) - }) - io.out := io.in - retime(this) -} - -class MyModule extends Module with RetimeLib { - val io = IO(new Bundle { - val in = Input(UInt(15.W)) - val out = Output(UInt(15.W)) - }) - io.out := io.in -} - -class RetimeInstance extends Module with RetimeLib { - val io = IO(new Bundle { - val in = Input(UInt(15.W)) - val out = Output(UInt(15.W)) - }) - val instance = Module(new MyModule) - retime(instance) - instance.io.in := io.in - io.out := instance.io.out -} diff --git a/src/test/scala/mdf/macrolib/ConfReaderSpec.scala b/src/test/scala/mdf/macrolib/ConfReaderSpec.scala deleted file mode 100644 index 58680cd78..000000000 --- a/src/test/scala/mdf/macrolib/ConfReaderSpec.scala +++ /dev/null @@ -1,101 +0,0 @@ -package mdf.macrolib - -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers - -class ConfReaderSpec extends AnyFlatSpec with Matchers { - - /** Generate a read port in accordance with RenameAnnotatedMemoryPorts. */ - def generateReadPort(num: Int, width: Int, depth: Int): MacroPort = { - MacroPort( - address = PolarizedPort(s"R${num}_addr", ActiveHigh), - clock = Some(PolarizedPort(s"R${num}_clk", PositiveEdge)), - output = Some(PolarizedPort(s"R${num}_data", ActiveHigh)), - width = Some(width), - depth = Some(depth) - ) - } - - /** Generate a write port in accordance with RenameAnnotatedMemoryPorts. */ - def generateWritePort(num: Int, width: Int, depth: Int, maskGran: Option[Int] = None): MacroPort = { - MacroPort( - address = PolarizedPort(s"W${num}_addr", ActiveHigh), - clock = Some(PolarizedPort(s"W${num}_clk", PositiveEdge)), - input = Some(PolarizedPort(s"W${num}_data", ActiveHigh)), - maskPort = if (maskGran.isDefined) Some(PolarizedPort(s"W${num}_mask", ActiveHigh)) else None, - maskGran = maskGran, - width = Some(184), - depth = Some(128) - ) - } - - "ConfReader" should "read a 1rw conf line" in { - val confStr = "name Foo_Bar_mem123_ext depth 128 width 184 ports mrw mask_gran 23" - ConfReader.readSingleLine(confStr) shouldBe SRAMMacro( - name = "Foo_Bar_mem123_ext", - width = 184, - depth = 128, - family = "1rw", - ports = List( - MacroPort( - address = PolarizedPort("RW0_addr", ActiveHigh), - clock = Some(PolarizedPort("RW0_clk", PositiveEdge)), - writeEnable = Some(PolarizedPort("RW0_wmode", ActiveHigh)), - output = Some(PolarizedPort("RW0_wdata", ActiveHigh)), - input = Some(PolarizedPort("RW0_rdata", ActiveHigh)), - maskPort = Some(PolarizedPort("RW0_wmask", ActiveHigh)), - maskGran = Some(23), - width = Some(184), - depth = Some(128) - ) - ), - extraPorts = List() - ) - } - - "ConfReader" should "read a 1r1w conf line" in { - val confStr = "name Foo_Bar_mem321_ext depth 128 width 184 ports read,mwrite mask_gran 23" - ConfReader.readSingleLine(confStr) shouldBe SRAMMacro( - name = "Foo_Bar_mem321_ext", - width = 184, - depth = 128, - family = "1r1w", - ports = List( - generateReadPort(0, 184, 128), - generateWritePort(0, 184, 128, Some(23)) - ), - extraPorts = List() - ) - } - - "ConfReader" should "read a mixed 1r2w conf line" in { - val confStr = "name Foo_Bar_mem321_ext depth 128 width 184 ports read,mwrite,write mask_gran 23" - ConfReader.readSingleLine(confStr) shouldBe SRAMMacro( - name = "Foo_Bar_mem321_ext", - width = 184, - depth = 128, - family = "1r2w", - ports = List( - generateReadPort(0, 184, 128), - generateWritePort(0, 184, 128, Some(23)), - generateWritePort(1, 184, 128) - ), - extraPorts = List() - ) - } - - "ConfReader" should "read a 42r29w conf line" in { - val confStr = - "name Foo_Bar_mem321_ext depth 128 width 184 ports " + (Seq.fill(42)("read") ++ Seq.fill(29)("mwrite")) - .mkString(",") + " mask_gran 23" - ConfReader.readSingleLine(confStr) shouldBe SRAMMacro( - name = "Foo_Bar_mem321_ext", - width = 184, - depth = 128, - family = "42r29w", - ports = ((0 to 41).map((num: Int) => generateReadPort(num, 184, 128))) ++ - ((0 to 28).map((num: Int) => generateWritePort(num, 184, 128, Some(23)))), - extraPorts = List() - ) - } -} diff --git a/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala b/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala deleted file mode 100644 index c6a9e7ce9..000000000 --- a/src/test/scala/mdf/macrolib/FlipChipMacroSpec.scala +++ /dev/null @@ -1,15 +0,0 @@ -package mdf.macrolib - -import firrtl.FileUtils -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers - -class FlipChipMacroSpec extends AnyFlatSpec with Matchers { - "Parsing flipchipmacros" should "work" in { - val stream = FileUtils.getLinesResource("/bumps.json") - val mdf = Utils.readMDFFromString(stream.mkString("\n")) - mdf match { - case Some(Seq(fcp: FlipChipMacro)) => println(fcp.visualize) - } - } -} diff --git a/src/test/scala/mdf/macrolib/IOMacroSpec.scala b/src/test/scala/mdf/macrolib/IOMacroSpec.scala deleted file mode 100644 index c6ab6e104..000000000 --- a/src/test/scala/mdf/macrolib/IOMacroSpec.scala +++ /dev/null @@ -1,67 +0,0 @@ -package mdf.macrolib - -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers - -class IOMacroSpec extends AnyFlatSpec with Matchers { - "Ground IOs" should "be detected" in { - val json = - """{ - | "name" : "GND", - | "type" : "ground" - |}""".stripMargin - val m = JSONUtils.readStringValueMap(json).get - IOMacro.parseJSON(m) shouldBe Some(IOMacro("GND", Ground)) - } - "Power IOs" should "be detected" in { - val json = - """{ - | "name" : "VDD0V8", - | "type" : "power" - |}""".stripMargin - val m = JSONUtils.readStringValueMap(json).get - IOMacro.parseJSON(m) shouldBe Some(IOMacro("VDD0V8", Power)) - } - "Digital IOs" should "be detected" in { - val json = - """{ - | "name" : "VDDC0_SEL[1:0]", - | "type" : "digital", - | "direction" : "output", - | "termination" : "CMOS" - |}""".stripMargin - val m = JSONUtils.readStringValueMap(json).get - IOMacro.parseJSON(m) shouldBe Some(IOMacro("VDDC0_SEL[1:0]", Digital, Some(Output), Some(CMOS))) - } - "Digital IOs with termination" should "be detected" in { - val json = - """{ - | "name" : "CCLK1", - | "type" : "digital", - | "direction" : "input", - | "termination" : 50, - | "terminationType" : "single", - | "terminationReference" : "GND" - |}""".stripMargin - val m = JSONUtils.readStringValueMap(json).get - IOMacro.parseJSON(m) shouldBe Some( - IOMacro("CCLK1", Digital, Some(Input), Some(Resistive(50)), Some(Single), Some("GND")) - ) - } - "Digital IOs with matching and termination" should "be detected" in { - val json = - """{ - | "name" : "REFCLK0P", - | "type" : "analog", - | "direction" : "input", - | "match" : ["REFCLK0N"], - | "termination" : 100, - | "terminationType" : "differential", - | "terminationReference" : "GND" - |}""".stripMargin - val m = JSONUtils.readStringValueMap(json).get - IOMacro.parseJSON(m) shouldBe Some( - IOMacro("REFCLK0P", Analog, Some(Input), Some(Resistive(100)), Some(Differential), Some("GND"), List("REFCLK0N")) - ) - } -} diff --git a/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala b/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala deleted file mode 100644 index b09422a74..000000000 --- a/src/test/scala/mdf/macrolib/IOPropertiesSpec.scala +++ /dev/null @@ -1,15 +0,0 @@ -package mdf.macrolib - -import firrtl.FileUtils -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers - -class IOPropertiesSpec extends AnyFlatSpec with Matchers { - "Parsing io_properties" should "work" in { - val stream = FileUtils.getLinesResource("/io_properties.json") - val mdf = Utils.readMDFFromString(stream.mkString("\n")) - mdf match { - case Some(Seq(fcp: IOProperties)) => - } - } -} diff --git a/src/test/scala/mdf/macrolib/MacroLibOutput.scala b/src/test/scala/mdf/macrolib/MacroLibOutput.scala deleted file mode 100644 index 85feaffaf..000000000 --- a/src/test/scala/mdf/macrolib/MacroLibOutput.scala +++ /dev/null @@ -1,270 +0,0 @@ -package mdf.macrolib - -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import play.api.libs.json._ - -import java.io.File - -// Output tests (Scala -> JSON). -// TODO: unify these tests with the input tests? - -trait HasAwesomeMemData { - def getAwesomeMem() = { - SRAMMacro( - name = "awesome_mem", - width = 32, - depth = 1024, - family = "1rw", - ports = Seq( - MacroPort( - address = PolarizedPort(name = "addr", polarity = ActiveHigh), - clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), - writeEnable = Some(PolarizedPort(name = "write_enable", polarity = ActiveHigh)), - readEnable = Some(PolarizedPort(name = "read_enable", polarity = ActiveHigh)), - chipEnable = Some(PolarizedPort(name = "chip_enable", polarity = ActiveHigh)), - output = Some(PolarizedPort(name = "data_out", polarity = ActiveHigh)), - input = Some(PolarizedPort(name = "data_in", polarity = ActiveHigh)), - maskPort = Some(PolarizedPort(name = "mask", polarity = ActiveHigh)), - maskGran = Some(8), - width = Some(32), - depth = Some(1024) // These numbers don't matter. - ) - ), - extraPorts = List() - ) - } - - def getAwesomeMemJSON(): String = { - """ - | { - | "type": "sram", - | "name": "awesome_mem", - | "width": 32, - | "depth": "1024", - | "mux": 1, - | "mask":true, - | "family": "1rw", - | "ports": [ - | { - | "address port name": "addr", - | "address port polarity": "active high", - | "clock port name": "clk", - | "clock port polarity": "positive edge", - | "write enable port name": "write_enable", - | "write enable port polarity": "active high", - | "read enable port name": "read_enable", - | "read enable port polarity": "active high", - | "chip enable port name": "chip_enable", - | "chip enable port polarity": "active high", - | "output port name": "data_out", - | "output port polarity": "active high", - | "input port name": "data_in", - | "input port polarity": "active high", - | "mask port name": "mask", - | "mask port polarity": "active high", - | "mask granularity": 8 - | } - | ] - | } - |""".stripMargin - } -} - -// Tests for filler macros. -class FillerMacroOutput extends AnyFlatSpec with Matchers { - "Valid lvt macro" should "be generated" in { - val expected = """ - | { - | "type": "filler cell", - | "name": "MY_FILLER_CELL", - | "vt": "lvt" - | } - |""".stripMargin - FillerMacro("MY_FILLER_CELL", "lvt").toJSON shouldBe Json.parse(expected) - } - - "Valid metal macro" should "be generated" in { - val expected = """ - | { - | "type": "metal filler cell", - | "name": "METAL_FILLER_CELL", - | "vt": "lvt" - | } - |""".stripMargin - MetalFillerMacro("METAL_FILLER_CELL", "lvt").toJSON shouldBe Json.parse(expected) - } - - "Valid hvt macro" should "be generated" in { - val expected = """ - | { - | "type": "filler cell", - | "name": "HVT_CELL_PROP", - | "vt": "hvt" - | } - |""".stripMargin - FillerMacro("HVT_CELL_PROP", "hvt").toJSON shouldBe Json.parse(expected) - } -} - -class SRAMPortOutput extends AnyFlatSpec with Matchers { - "Extra port" should "be generated" in { - val m = MacroExtraPort( - name = "TIE_HIGH", - width = 8, - portType = Constant, - value = ((1 << 8) - 1) - ) - val expected = """ - | { - | "type": "constant", - | "name": "TIE_HIGH", - | "width": 8, - | "value": 255 - | } - |""".stripMargin - m.toJSON shouldBe Json.parse(expected) - } - - "Minimal write port" should "be generated" in { - val m = MacroPort( - address = PolarizedPort(name = "addr", polarity = ActiveHigh), - clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), - writeEnable = Some(PolarizedPort(name = "write_enable", polarity = ActiveHigh)), - input = Some(PolarizedPort(name = "data_in", polarity = ActiveHigh)), - width = Some(32), - depth = Some(1024) // These numbers don't matter. - ) - val expected = """ - | { - | "address port name": "addr", - | "address port polarity": "active high", - | "clock port name": "clk", - | "clock port polarity": "positive edge", - | "write enable port name": "write_enable", - | "write enable port polarity": "active high", - | "input port name": "data_in", - | "input port polarity": "active high" - | } - |""".stripMargin - m.toJSON shouldBe Json.parse(expected) - } - - "Minimal read port" should "be generated" in { - val m = MacroPort( - address = PolarizedPort(name = "addr", polarity = ActiveHigh), - clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), - output = Some(PolarizedPort(name = "data_out", polarity = ActiveHigh)), - width = Some(32), - depth = Some(1024) // These numbers don't matter. - ) - val expected = """ - | { - | "address port name": "addr", - | "address port polarity": "active high", - | "clock port name": "clk", - | "clock port polarity": "positive edge", - | "output port name": "data_out", - | "output port polarity": "active high" - | } - |""".stripMargin - m.toJSON shouldBe Json.parse(expected) - } - - "Masked read port" should "be generated" in { - val m = MacroPort( - address = PolarizedPort(name = "addr", polarity = ActiveHigh), - clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), - output = Some(PolarizedPort(name = "data_out", polarity = ActiveHigh)), - maskPort = Some(PolarizedPort(name = "mask", polarity = ActiveHigh)), - maskGran = Some(8), - width = Some(32), - depth = Some(1024) // These numbers don't matter. - ) - val expected = """ - | { - | "address port name": "addr", - | "address port polarity": "active high", - | "clock port name": "clk", - | "clock port polarity": "positive edge", - | "output port name": "data_out", - | "output port polarity": "active high", - | "mask port name": "mask", - | "mask port polarity": "active high", - | "mask granularity": 8 - | } - |""".stripMargin - m.toJSON shouldBe Json.parse(expected) - } - - "Everything port" should "be generated" in { - val m = MacroPort( - address = PolarizedPort(name = "addr", polarity = ActiveHigh), - clock = Some(PolarizedPort(name = "clk", polarity = PositiveEdge)), - writeEnable = Some(PolarizedPort(name = "write_enable", polarity = ActiveHigh)), - readEnable = Some(PolarizedPort(name = "read_enable", polarity = ActiveHigh)), - chipEnable = Some(PolarizedPort(name = "chip_enable", polarity = ActiveHigh)), - output = Some(PolarizedPort(name = "data_out", polarity = ActiveHigh)), - input = Some(PolarizedPort(name = "data_in", polarity = ActiveHigh)), - maskPort = Some(PolarizedPort(name = "mask", polarity = ActiveHigh)), - maskGran = Some(8), - width = Some(32), - depth = Some(1024) // These numbers don't matter. - ) - val expected = """ - | { - | "address port name": "addr", - | "address port polarity": "active high", - | "clock port name": "clk", - | "clock port polarity": "positive edge", - | "write enable port name": "write_enable", - | "write enable port polarity": "active high", - | "read enable port name": "read_enable", - | "read enable port polarity": "active high", - | "chip enable port name": "chip_enable", - | "chip enable port polarity": "active high", - | "output port name": "data_out", - | "output port polarity": "active high", - | "input port name": "data_in", - | "input port polarity": "active high", - | "mask port name": "mask", - | "mask port polarity": "active high", - | "mask granularity": 8 - | } - |""".stripMargin - m.toJSON shouldBe Json.parse(expected) - } -} - -class SRAMMacroOutput extends AnyFlatSpec with Matchers with HasAwesomeMemData { - "SRAM macro" should "be generated" in { - val m = getAwesomeMem - val expected = getAwesomeMemJSON - m.toJSON shouldBe Json.parse(expected) - } -} - -class InputOutput extends AnyFlatSpec with Matchers with HasAwesomeMemData { - "Read-write string" should "preserve data" in { - val mdf = List( - FillerMacro("MY_FILLER_CELL", "lvt"), - MetalFillerMacro("METAL_GEAR_FILLER", "hvt"), - getAwesomeMem - ) - Utils.readMDFFromString(Utils.writeMDFToString(mdf)) shouldBe Some(mdf) - } - - val testDir: String = "test_run_dir" - new File(testDir).mkdirs // Make sure the testDir exists - - "Read-write file" should "preserve data" in { - val mdf = List( - FillerMacro("MY_FILLER_CELL", "lvt"), - MetalFillerMacro("METAL_GEAR_FILLER", "hvt"), - getAwesomeMem - ) - val filename = testDir + "/" + "mdf_read_write_test.json" - Utils.writeMDFToPath(Some(filename), mdf) shouldBe true - Utils.readMDFFromPath(Some(filename)) shouldBe Some(mdf) - } -} diff --git a/src/test/scala/mdf/macrolib/MacroLibSpec.scala b/src/test/scala/mdf/macrolib/MacroLibSpec.scala deleted file mode 100644 index fd3210bb2..000000000 --- a/src/test/scala/mdf/macrolib/MacroLibSpec.scala +++ /dev/null @@ -1,406 +0,0 @@ -package mdf.macrolib - -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import play.api.libs.json._ - -object JSONUtils { - def readStringValueMap(str: String): Option[Map[String, JsValue]] = { - Json.parse(str) match { - case x: JsObject => Some(x.as[Map[String, JsValue]]) - case _ => None - } - } -} - -// Tests for filler macros -class FillerMacroSpec extends AnyFlatSpec with Matchers { - "Valid lvt macros" should "be detected" in { - val m = JSONUtils - .readStringValueMap(""" - | { - | "type": "filler cell", - | "name": "MY_FILLER_CELL", - | "vt": "lvt" - | } - |""".stripMargin) - .get - FillerMacroBase.parseJSON(m) shouldBe Some(FillerMacro("MY_FILLER_CELL", "lvt")) - } - - "Valid metal macro" should "be detected" in { - val m = JSONUtils - .readStringValueMap(""" - | { - | "type": "metal filler cell", - | "name": "METAL_FILLER_CELL", - | "vt": "lvt" - | } - |""".stripMargin) - .get - FillerMacroBase.parseJSON(m) shouldBe Some(MetalFillerMacro("METAL_FILLER_CELL", "lvt")) - } - - "Valid hvt macros" should "be detected" in { - val m = JSONUtils - .readStringValueMap(""" - | { - | "type": "filler cell", - | "name": "HVT_CELL_PROP", - | "vt": "hvt" - | } - |""".stripMargin) - .get - FillerMacroBase.parseJSON(m) shouldBe Some(FillerMacro("HVT_CELL_PROP", "hvt")) - } - - "Empty name macros" should "be rejected" in { - val m = JSONUtils - .readStringValueMap(""" - | { - | "type": "filler cell", - | "name": "", - | "vt": "hvt" - | } - |""".stripMargin) - .get - FillerMacroBase.parseJSON(m) shouldBe None - } - - "Empty vt macros" should "be rejected" in { - val m = JSONUtils - .readStringValueMap(""" - | { - | "type": "metal filler cell", - | "name": "DEAD_CELL", - | "vt": "" - | } - |""".stripMargin) - .get - FillerMacroBase.parseJSON(m) shouldBe None - } - - "Missing vt macros" should "be rejected" in { - val m = JSONUtils - .readStringValueMap(""" - | { - | "type": "metal filler cell", - | "name": "DEAD_CELL" - | } - |""".stripMargin) - .get - FillerMacroBase.parseJSON(m) shouldBe None - } - - "Missing name macros" should "be rejected" in { - val m = JSONUtils - .readStringValueMap(""" - | { - | "type": "filler cell", - | "vt": "" - | } - |""".stripMargin) - .get - FillerMacroBase.parseJSON(m) shouldBe None - } -} - -// Tests for SRAM type and associates. -class SRAMMacroSpec extends AnyFlatSpec with Matchers { - // Simple port which can be reused in tests - // Note: assume width=depth=simplePortConstant. - val simplePortConstant = 1024 - def simplePort( - postfix: String = "", - width: Int = simplePortConstant, - depth: Int = simplePortConstant - ): (String, MacroPort) = { - val json = s""" - { - "address port name": "A_${postfix}", - "address port polarity": "active high", - "clock port name": "CLK_${postfix}", - "clock port polarity": "positive edge", - "write enable port name": "WEN_${postfix}", - "write enable port polarity": "active high", - "read enable port name": "REN_${postfix}", - "read enable port polarity": "active high", - "chip enable port name": "CEN_${postfix}", - "chip enable port polarity": "active high", - "output port name": "OUT_${postfix}", - "output port polarity": "active high", - "input port name": "IN_${postfix}", - "input port polarity": "active high", - "mask granularity": 1, - "mask port name": "MASK_${postfix}", - "mask port polarity": "active high" - } - """ - val port = MacroPort( - address = PolarizedPort(s"A_${postfix}", ActiveHigh), - clock = Some(PolarizedPort(s"CLK_${postfix}", PositiveEdge)), - writeEnable = Some(PolarizedPort(s"WEN_${postfix}", ActiveHigh)), - readEnable = Some(PolarizedPort(s"REN_${postfix}", ActiveHigh)), - chipEnable = Some(PolarizedPort(s"CEN_${postfix}", ActiveHigh)), - output = Some(PolarizedPort(s"OUT_${postfix}", ActiveHigh)), - input = Some(PolarizedPort(s"IN_${postfix}", ActiveHigh)), - maskPort = Some(PolarizedPort(s"MASK_${postfix}", ActiveHigh)), - maskGran = Some(1), - width = Some(width), - depth = Some(depth) - ) - (json, port) - } - "Simple port" should "be valid" in { - { - val (json, port) = simplePort("Simple1") - MacroPort.parseJSON(JSONUtils.readStringValueMap(json).get, simplePortConstant, simplePortConstant) shouldBe Some( - port - ) - } - { - val (json, port) = simplePort("Simple2") - MacroPort.parseJSON(JSONUtils.readStringValueMap(json).get, simplePortConstant, simplePortConstant) shouldBe Some( - port - ) - } - { - val (json, port) = simplePort("bar") - MacroPort.parseJSON(JSONUtils.readStringValueMap(json).get, simplePortConstant, simplePortConstant) shouldBe Some( - port - ) - } - { - val (json, port) = simplePort("") - MacroPort.parseJSON(JSONUtils.readStringValueMap(json).get, simplePortConstant, simplePortConstant) shouldBe Some( - port - ) - } - } - - "Simple SRAM macro" should "be detected" in { - val (json, port) = simplePort("", 2048, 4096) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "SRAMS_R_US", - "width": 2048, - "depth": "4096", - "family": "1rw", - "ports": [ - ${json} - ] -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe Some( - SRAMMacro("SRAMS_R_US", width = 2048, depth = 4096, family = "1rw", ports = List(port), extraPorts = List()) - ) - } - - "Non-power-of-two width & depth SRAM macro" should "be detected" in { - val (json, port) = simplePort("", 1234, 8888) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "SRAMS_R_US", - "width": 1234, - "depth": "8888", - "family": "1rw", - "ports": [ - ${json} - ] -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe Some( - SRAMMacro("SRAMS_R_US", width = 1234, depth = 8888, family = "1rw", ports = List(port), extraPorts = List()) - ) - } - - "Minimal memory port" should "be detected" in { - val (json, port) = simplePort("_A", 64, 1024) - val port2 = MacroPort( - address = PolarizedPort("A_B", ActiveHigh), - clock = Some(PolarizedPort("CLK_B", PositiveEdge)), - writeEnable = Some(PolarizedPort("WEN_B", ActiveHigh)), - readEnable = None, - chipEnable = None, - output = Some(PolarizedPort("OUT_B", ActiveHigh)), - input = Some(PolarizedPort("IN_B", ActiveHigh)), - maskPort = None, - maskGran = None, - width = Some(64), - depth = Some(1024) - ) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "SRAMS_R_US", - "width": 64, - "depth": "1024", - "family": "2rw", - "ports": [ - ${json}, - { - "address port name": "A_B", - "address port polarity": "active high", - "clock port name": "CLK_B", - "clock port polarity": "positive edge", - "write enable port name": "WEN_B", - "write enable port polarity": "active high", - "output port name": "OUT_B", - "output port polarity": "active high", - "input port name": "IN_B", - "input port polarity": "active high" - } - ] -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe Some( - SRAMMacro("SRAMS_R_US", width = 64, depth = 1024, family = "2rw", ports = List(port, port2), extraPorts = List()) - ) - } - - "Extra ports" should "be detected" in { - val (json, port) = simplePort("", 2048, 4096) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "GOT_EXTRA", - "width": 2048, - "depth": "4096", - "family": "1rw", - "ports": [ - ${json} - ], - "extra ports": [ - { - "name": "TIE_DIE", - "width": 1, - "type": "constant", - "value": 1 - }, - { - "name": "TIE_MOO", - "width": 4, - "type": "constant", - "value": 0 - } - ] -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe Some( - SRAMMacro( - "GOT_EXTRA", - width = 2048, - depth = 4096, - family = "1rw", - ports = List(port), - extraPorts = List( - MacroExtraPort( - name = "TIE_DIE", - width = 1, - portType = Constant, - value = 1 - ), - MacroExtraPort( - name = "TIE_MOO", - width = 4, - portType = Constant, - value = 0 - ) - ) - ) - ) - } - - "Invalid port" should "be rejected" in { - val (json, port) = simplePort("", 2048, 4096) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "SRAMS_R_US", - "width": 2048, - "depth": "4096", - "family": "1rw", - "ports": [ - { - "address port name": "missing_polarity", - "output port name": "missing_clock" - } - ] -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe None - } - - "No ports" should "be rejected" in { - val (json, port) = simplePort("", 2048, 4096) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "SRAMS_R_US", - "width": 2048, - "depth": "4096", - "family": "1rw" -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe None - } - - "No family and ports" should "be rejected" in { - val (json, port) = simplePort("", 2048, 4096) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "SRAMS_R_US", - "width": 2048, - "depth": "4096" -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe None - } - - "String width" should "be rejected" in { - val (json, port) = simplePort("", 2048, 4096) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "BAD_BAD_SRAM", - "width": "wide", - "depth": "4096" -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe None - } - - "String depth" should "be rejected" in { - val (json, port) = simplePort("", 2048, 4096) - val m = JSONUtils - .readStringValueMap(s""" -{ - "type": "sram", - "name": "BAD_BAD_SRAM", - "width": 512, - "depth": "octopus_under_the_sea" -} - """) - .get - SRAMMacro.parseJSON(m) shouldBe None - } -} From d7060f4b5a8022d3c2638648d50129136df553d4 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 19 Apr 2024 11:07:45 -0700 Subject: [PATCH 271/273] Delete barstools submodule --- .gitmodules | 3 --- tools/barstools | 1 - 2 files changed, 4 deletions(-) delete mode 160000 tools/barstools diff --git a/.gitmodules b/.gitmodules index 6ebbad59b..764a43819 100644 --- a/.gitmodules +++ b/.gitmodules @@ -127,9 +127,6 @@ [submodule "tools/axe"] path = tools/axe url = https://github.com/CTSRD-CHERI/axe.git -[submodule "tools/barstools"] - path = tools/barstools - url = https://github.com/ucb-bar/barstools.git [submodule "tools/cde"] path = tools/cde url = https://github.com/chipsalliance/cde.git diff --git a/tools/barstools b/tools/barstools deleted file mode 160000 index 60a1be9bf..000000000 --- a/tools/barstools +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 60a1be9bfe344fccbddd4874524accb3c9d2ade9 From 9436aea1e8020bf1854ae19804f3b7eb9d10f3d9 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 19 Apr 2024 11:30:15 -0700 Subject: [PATCH 272/273] Fixes for in-tree barstools --- .github/scripts/check-commit.sh | 2 +- build.sbt | 9 +++------ common.mk | 16 ++++++++-------- docs/Customization/Custom-Chisel.rst | 2 +- generators/chipyard/src/main/scala/ChipTop.scala | 2 +- .../src/main/scala/clocking/ClockBinders.scala | 2 +- .../src/main/scala/example/CustomChipTop.scala | 2 +- .../src/main/scala/example/FlatChipTop.scala | 2 +- .../src/main/scala/harness/HarnessBinders.scala | 2 +- .../src/main/scala/iobinders/IOBinders.scala | 2 +- .../chipyard/src/main/scala/iocell/Analog.scala | 4 ++-- .../chipyard/src/main/scala/iocell/IOCell.scala | 2 +- .../firechip/src/main/scala/BridgeBinders.scala | 2 +- scripts/tutorial-patches/build.sbt.patch | 2 +- .../src/main/scala/macros/CostMetric.scala | 2 +- .../src/main/scala/macros/MacroCompiler.scala | 4 ++-- .../src/main/scala/macros/SynFlopsPass.scala | 4 ++-- tools/tapeout/src/main/scala/macros/Utils.scala | 2 +- .../main/scala/transforms/ExtraTransforms.scala | 2 +- .../transforms/GenerateModelStageMain.scala | 4 ++-- .../main/scala/transforms/retime/Retime.scala | 2 +- .../scala/transforms/stage/TapeoutStage.scala | 4 ++-- .../main/scala/transforms/utils/FileUtils.scala | 2 +- .../transforms/utils/LowerAnnotations.scala | 2 +- .../transforms/utils/ProgrammaticBundle.scala | 2 +- .../scala/transforms/utils/YamlHelpers.scala | 2 +- 26 files changed, 40 insertions(+), 43 deletions(-) diff --git a/.github/scripts/check-commit.sh b/.github/scripts/check-commit.sh index 8043c12fe..8fa9c24ce 100755 --- a/.github/scripts/check-commit.sh +++ b/.github/scripts/check-commit.sh @@ -92,7 +92,7 @@ dir="software" branches=("master" "dev") search -submodules=("DRAMSim2" "axe" "barstools" "dsptools" "rocket-dsp-utils" "torture" "fixedpoint" "cde") +submodules=("DRAMSim2" "axe" "dsptools" "rocket-dsp-utils" "torture" "fixedpoint" "cde") dir="tools" branches=("master" "dev" "main") search diff --git a/build.sbt b/build.sbt index d929b3ca2..03bfeaab5 100644 --- a/build.sbt +++ b/build.sbt @@ -158,7 +158,7 @@ lazy val testchipip = (project in file("generators/testchipip")) .settings(commonSettings) lazy val chipyard = (project in file("generators/chipyard")) - .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, iocell, + .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, sha3, // On separate line to allow for cleaner tutorial-setup patches dsptools, rocket_dsp_utils, gemmini, icenet, tracegen, cva6, nvdla, sodor, ibex, fft_generator, @@ -256,13 +256,10 @@ lazy val rocc_acc_utils = (project in file("generators/rocc-acc-utils")) .settings(libraryDependencies ++= rocketLibDeps.value) .settings(commonSettings) -lazy val iocell = Project(id = "iocell", base = file("./tools/barstools/") / "iocell") - .settings(chiselSettings) - .settings(commonSettings) - -lazy val tapeout = (project in file("./tools/barstools/")) +lazy val tapeout = (project in file("./tools/tapeout/")) .settings(chiselSettings) .settings(commonSettings) + .settings(libraryDependencies ++= Seq("com.typesafe.play" %% "play-json" % "2.9.2")) lazy val fixedpoint = (project in file("./tools/fixedpoint/")) .settings(chiselSettings) diff --git a/common.mk b/common.mk index a2e31b514..ea5811acd 100644 --- a/common.mk +++ b/common.mk @@ -91,9 +91,9 @@ VLOG_EXT = sv v CHIPYARD_SOURCE_DIRS = $(addprefix $(base_dir)/,generators sims/firesim/sim fpga/fpga-shells fpga/src) CHIPYARD_SCALA_SOURCES = $(call lookup_srcs_by_multiple_type,$(CHIPYARD_SOURCE_DIRS),$(SCALA_EXT)) CHIPYARD_VLOG_SOURCES = $(call lookup_srcs_by_multiple_type,$(CHIPYARD_SOURCE_DIRS),$(VLOG_EXT)) -BARSTOOLS_SOURCE_DIRS = $(addprefix $(base_dir)/,tools/barstools) -BARSTOOLS_SCALA_SOURCES = $(call lookup_srcs_by_multiple_type,$(BARSTOOLS_SOURCE_DIRS),$(SCALA_EXT)) -BARSTOOLS_VLOG_SOURCES = $(call lookup_srcs_by_multiple_type,$(BARSTOOLS_SOURCE_DIRS),$(VLOG_EXT)) +TAPEOUT_SOURCE_DIRS = $(addprefix $(base_dir)/,tools/tapeout) +TAPEOUT_SCALA_SOURCES = $(call lookup_srcs_by_multiple_type,$(TAPEOUT_SOURCE_DIRS),$(SCALA_EXT)) +TAPEOUT_VLOG_SOURCES = $(call lookup_srcs_by_multiple_type,$(TAPEOUT_SOURCE_DIRS),$(VLOG_EXT)) # This assumes no SBT meta-build sources SBT_SOURCE_DIRS = $(addprefix $(base_dir)/,generators sims/firesim/sim tools) SBT_SOURCES = $(call lookup_srcs,$(SBT_SOURCE_DIRS),sbt) $(base_dir)/build.sbt $(base_dir)/project/plugins.sbt $(base_dir)/project/build.properties @@ -127,7 +127,7 @@ $(CHIPYARD_CLASSPATH_TARGETS) &: $(CHIPYARD_SCALA_SOURCES) $(SCALA_BUILDTOOL_DEP $(call run_sbt_assembly,$(SBT_PROJECT),$(CHIPYARD_CLASSPATH)) # order only dependency between sbt runs needed to avoid concurrent sbt runs -$(TAPEOUT_CLASSPATH_TARGETS) &: $(BARSTOOLS_SCALA_SOURCES) $(SCALA_BUILDTOOL_DEPS) $(BARSTOOLS_VLOG_SOURCES) | $(CHIPYARD_CLASSPATH_TARGETS) +$(TAPEOUT_CLASSPATH_TARGETS) &: $(TAPEOUT_SCALA_SOURCES) $(SCALA_BUILDTOOL_DEPS) $(TAPEOUT_VLOG_SOURCES) | $(CHIPYARD_CLASSPATH_TARGETS) mkdir -p $(dir $@) $(call run_sbt_assembly,tapeout,$(TAPEOUT_CLASSPATH)) @@ -165,7 +165,7 @@ define sfc_extra_low_transforms_anno_contents [ { "class": "firrtl.stage.RunFirrtlTransformAnnotation", - "transform": "barstools.tapeout.transforms.ExtraLowTransforms" + "transform": "tapeout.transforms.ExtraLowTransforms" } ] endef @@ -232,7 +232,7 @@ $(FINAL_ANNO_FILE): $(EXTRA_ANNO_FILE) $(SFC_EXTRA_ANNO_FILE) $(SFC_LEVEL) $(SFC_MFC_TARGETS) &: private TMP_DIR := $(shell mktemp -d -t cy-XXXXXXXX) $(SFC_MFC_TARGETS) &: $(TAPEOUT_CLASSPATH_TARGETS) $(FIRRTL_FILE) $(FINAL_ANNO_FILE) $(SFC_LEVEL) $(EXTRA_FIRRTL_OPTIONS) $(MFC_LOWERING_OPTIONS) rm -rf $(GEN_COLLATERAL_DIR) - $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),barstools.tapeout.transforms.GenerateModelStageMain,\ + $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),tapeout.transforms.GenerateModelStageMain,\ --no-dedup \ --output-file $(SFC_FIRRTL_BASENAME) \ --output-annotation-file $(SFC_ANNO_FILE) \ @@ -297,12 +297,12 @@ $(TOP_SMEMS_CONF) $(MODEL_SMEMS_CONF) &: $(MFC_SMEMS_CONF) $(MFC_MODEL_HRCHY_JS # This file is for simulation only. VLSI flows should replace this file with one containing hard SRAMs TOP_MACROCOMPILER_MODE ?= --mode synflops $(TOP_SMEMS_FILE) $(TOP_SMEMS_FIR) &: $(TAPEOUT_CLASSPATH_TARGETS) $(TOP_SMEMS_CONF) - $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),barstools.macros.MacroCompiler,-n $(TOP_SMEMS_CONF) -v $(TOP_SMEMS_FILE) -f $(TOP_SMEMS_FIR) $(TOP_MACROCOMPILER_MODE)) + $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),tapeout.macros.MacroCompiler,-n $(TOP_SMEMS_CONF) -v $(TOP_SMEMS_FILE) -f $(TOP_SMEMS_FIR) $(TOP_MACROCOMPILER_MODE)) touch $(TOP_SMEMS_FILE) $(TOP_SMEMS_FIR) MODEL_MACROCOMPILER_MODE = --mode synflops $(MODEL_SMEMS_FILE) $(MODEL_SMEMS_FIR) &: $(TAPEOUT_CLASSPATH_TARGETS) $(MODEL_SMEMS_CONF) - $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),barstools.macros.MacroCompiler, -n $(MODEL_SMEMS_CONF) -v $(MODEL_SMEMS_FILE) -f $(MODEL_SMEMS_FIR) $(MODEL_MACROCOMPILER_MODE)) + $(call run_jar_scala_main,$(TAPEOUT_CLASSPATH),tapeout.macros.MacroCompiler, -n $(MODEL_SMEMS_CONF) -v $(MODEL_SMEMS_FILE) -f $(MODEL_SMEMS_FIR) $(MODEL_MACROCOMPILER_MODE)) touch $(MODEL_SMEMS_FILE) $(MODEL_SMEMS_FIR) ######################################################################################## diff --git a/docs/Customization/Custom-Chisel.rst b/docs/Customization/Custom-Chisel.rst index a3b4ef72c..19a7bde8c 100644 --- a/docs/Customization/Custom-Chisel.rst +++ b/docs/Customization/Custom-Chisel.rst @@ -59,7 +59,7 @@ should look something like this: .. code-block:: scala lazy val chipyard = (project in file("generators/chipyard")) - .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, iocell, + .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, sha3, dsptools, `rocket-dsp-utils`, gemmini, icenet, tracegen, cva6, nvdla, sodor, ibex, fft_generator, yourproject, // <- added to the middle of the list for simplicity diff --git a/generators/chipyard/src/main/scala/ChipTop.scala b/generators/chipyard/src/main/scala/ChipTop.scala index d80d71af6..518afa7d1 100644 --- a/generators/chipyard/src/main/scala/ChipTop.scala +++ b/generators/chipyard/src/main/scala/ChipTop.scala @@ -10,7 +10,7 @@ import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, LazyRawModuleI import freechips.rocketchip.util.{DontTouch} import chipyard.iobinders._ -import barstools.iocell.chisel._ +import chipyard.iocell._ case object BuildSystem extends Field[Parameters => LazyModule]((p: Parameters) => new DigitalTop()(p)) diff --git a/generators/chipyard/src/main/scala/clocking/ClockBinders.scala b/generators/chipyard/src/main/scala/clocking/ClockBinders.scala index fdb2ec9e5..d075fcbbe 100644 --- a/generators/chipyard/src/main/scala/clocking/ClockBinders.scala +++ b/generators/chipyard/src/main/scala/clocking/ClockBinders.scala @@ -7,7 +7,7 @@ import freechips.rocketchip.prci._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.subsystem._ import freechips.rocketchip.tilelink._ -import barstools.iocell.chisel._ +import chipyard.iocell._ // This uses the FakePLL, which uses a ClockAtFreq Verilog blackbox to generate // the requested clocks. This also adds TileLink ClockDivider and ClockSelector diff --git a/generators/chipyard/src/main/scala/example/CustomChipTop.scala b/generators/chipyard/src/main/scala/example/CustomChipTop.scala index eb0565ce1..5958d1b27 100644 --- a/generators/chipyard/src/main/scala/example/CustomChipTop.scala +++ b/generators/chipyard/src/main/scala/example/CustomChipTop.scala @@ -6,7 +6,7 @@ import chipyard.iobinders._ import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy.{InModuleBody} import freechips.rocketchip.subsystem.{PBUS, HasTileLinkLocations} -import barstools.iocell.chisel._ +import chipyard.iocell._ import chipyard._ import chipyard.harness.{BuildTop} import sifive.blocks.devices.uart._ diff --git a/generators/chipyard/src/main/scala/example/FlatChipTop.scala b/generators/chipyard/src/main/scala/example/FlatChipTop.scala index 6b249286a..d8f659f40 100644 --- a/generators/chipyard/src/main/scala/example/FlatChipTop.scala +++ b/generators/chipyard/src/main/scala/example/FlatChipTop.scala @@ -13,7 +13,7 @@ import chipyard.{BuildSystem, DigitalTop} import chipyard.harness.{BuildTop} import chipyard.clocking._ import chipyard.iobinders._ -import barstools.iocell.chisel._ +import chipyard.iocell._ import testchipip.serdes.{SerialTLKey} class WithFlatChipTop extends Config((site, here, up) => { diff --git a/generators/chipyard/src/main/scala/harness/HarnessBinders.scala b/generators/chipyard/src/main/scala/harness/HarnessBinders.scala index f357f357c..9bf97b184 100644 --- a/generators/chipyard/src/main/scala/harness/HarnessBinders.scala +++ b/generators/chipyard/src/main/scala/harness/HarnessBinders.scala @@ -12,7 +12,7 @@ import freechips.rocketchip.subsystem._ import freechips.rocketchip.util._ import freechips.rocketchip.jtag.{JTAGIO} import freechips.rocketchip.devices.debug.{SimJTAG} -import barstools.iocell.chisel._ +import chipyard.iocell._ import testchipip.dram.{SimDRAM} import testchipip.tsi.{SimTSI, SerialRAM, TSI, TSIIO} import testchipip.soc.{TestchipSimDTM} diff --git a/generators/chipyard/src/main/scala/iobinders/IOBinders.scala b/generators/chipyard/src/main/scala/iobinders/IOBinders.scala index b4f116d25..e00f43a4a 100644 --- a/generators/chipyard/src/main/scala/iobinders/IOBinders.scala +++ b/generators/chipyard/src/main/scala/iobinders/IOBinders.scala @@ -27,7 +27,7 @@ import sifive.blocks.devices.spi._ import sifive.blocks.devices.i2c._ import tracegen.{TraceGenSystemModuleImp} -import barstools.iocell.chisel._ +import chipyard.iocell._ import testchipip.serdes.{CanHavePeripheryTLSerial, SerialTLKey} import testchipip.spi.{SPIChipIO} diff --git a/generators/chipyard/src/main/scala/iocell/Analog.scala b/generators/chipyard/src/main/scala/iocell/Analog.scala index 0cdfc493b..78e1cfe76 100644 --- a/generators/chipyard/src/main/scala/iocell/Analog.scala +++ b/generators/chipyard/src/main/scala/iocell/Analog.scala @@ -1,6 +1,6 @@ // See LICENSE for license details -package barstools.iocell.chisel +package chipyard.iocell import chisel3._ import chisel3.util.{HasBlackBoxResource} @@ -10,7 +10,7 @@ class AnalogConst(value: Int, width: Int = 1) extends BlackBox(Map("CONST" -> IntParam(value), "WIDTH" -> IntParam(width))) with HasBlackBoxResource { val io = IO(new Bundle { val io = Analog(width.W) }) - addResource("/barstools/iocell/vsrc/Analog.v") + addResource("/vsrc/Analog.v") } object AnalogConst { diff --git a/generators/chipyard/src/main/scala/iocell/IOCell.scala b/generators/chipyard/src/main/scala/iocell/IOCell.scala index d38f8406e..5f0129b87 100644 --- a/generators/chipyard/src/main/scala/iocell/IOCell.scala +++ b/generators/chipyard/src/main/scala/iocell/IOCell.scala @@ -1,6 +1,6 @@ // See LICENSE for license details -package barstools.iocell.chisel +package chipyard.iocell import chisel3._ import chisel3.util.{Cat, HasBlackBoxInline} diff --git a/generators/firechip/src/main/scala/BridgeBinders.scala b/generators/firechip/src/main/scala/BridgeBinders.scala index 550893589..48ea9bb30 100644 --- a/generators/firechip/src/main/scala/BridgeBinders.scala +++ b/generators/firechip/src/main/scala/BridgeBinders.scala @@ -26,7 +26,7 @@ import firesim.configs.MemModelKey import tracegen.{TraceGenSystemModuleImp} import cva6.CVA6Tile -import barstools.iocell.chisel._ +import chipyard.iocell._ import chipyard.iobinders._ import chipyard._ import chipyard.harness._ diff --git a/scripts/tutorial-patches/build.sbt.patch b/scripts/tutorial-patches/build.sbt.patch index b64d7a439..613790c9e 100644 --- a/scripts/tutorial-patches/build.sbt.patch +++ b/scripts/tutorial-patches/build.sbt.patch @@ -5,7 +5,7 @@ index c3be6161..2a6d7160 100644 @@ -147,7 +147,7 @@ lazy val testchipip = (project in file("generators/testchipip")) lazy val chipyard = (project in file("generators/chipyard")) - .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, iocell, + .dependsOn(testchipip, rocketchip, boom, hwacha, rocketchip_blocks, rocketchip_inclusive_cache, - sha3, // On separate line to allow for cleaner tutorial-setup patches + //sha3, // On separate line to allow for cleaner tutorial-setup patches dsptools, rocket_dsp_utils, diff --git a/tools/tapeout/src/main/scala/macros/CostMetric.scala b/tools/tapeout/src/main/scala/macros/CostMetric.scala index 647889d34..1be339670 100644 --- a/tools/tapeout/src/main/scala/macros/CostMetric.scala +++ b/tools/tapeout/src/main/scala/macros/CostMetric.scala @@ -1,6 +1,6 @@ // See LICENSE for license details. -package barstools.macros +package tapeout.macros /** Trait which can calculate the cost of compiling a memory against a certain * library memory macro using a cost function. diff --git a/tools/tapeout/src/main/scala/macros/MacroCompiler.scala b/tools/tapeout/src/main/scala/macros/MacroCompiler.scala index 459992f2d..fbf857fea 100644 --- a/tools/tapeout/src/main/scala/macros/MacroCompiler.scala +++ b/tools/tapeout/src/main/scala/macros/MacroCompiler.scala @@ -5,9 +5,9 @@ * lib - technology SRAM(s) to use to compile mem */ -package barstools.macros +package tapeout.macros -import barstools.macros.Utils._ +import tapeout.macros.Utils._ import firrtl.Utils.{one, zero, BoolType} import firrtl.annotations._ import firrtl.ir._ diff --git a/tools/tapeout/src/main/scala/macros/SynFlopsPass.scala b/tools/tapeout/src/main/scala/macros/SynFlopsPass.scala index 5dda0476a..0c1dd8043 100644 --- a/tools/tapeout/src/main/scala/macros/SynFlopsPass.scala +++ b/tools/tapeout/src/main/scala/macros/SynFlopsPass.scala @@ -1,8 +1,8 @@ // See LICENSE for license details. -package barstools.macros +package tapeout.macros -import barstools.macros.Utils._ +import tapeout.macros.Utils._ import firrtl.Utils.{one, zero} import firrtl._ import firrtl.ir._ diff --git a/tools/tapeout/src/main/scala/macros/Utils.scala b/tools/tapeout/src/main/scala/macros/Utils.scala index 2bcd116fb..8715ec105 100644 --- a/tools/tapeout/src/main/scala/macros/Utils.scala +++ b/tools/tapeout/src/main/scala/macros/Utils.scala @@ -1,6 +1,6 @@ // See LICENSE for license details. -package barstools.macros +package tapeout.macros import firrtl.Utils.BoolType import firrtl.ir._ diff --git a/tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala b/tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala index f7ef25c6c..4b427653b 100644 --- a/tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala +++ b/tools/tapeout/src/main/scala/transforms/ExtraTransforms.scala @@ -1,6 +1,6 @@ // See LICENSE for license details. -package barstools.tapeout.transforms +package tapeout.transforms import firrtl.Mappers._ import firrtl._ diff --git a/tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala b/tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala index 08cd8d04a..5a1a3fdbb 100644 --- a/tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala +++ b/tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala @@ -1,6 +1,6 @@ -package barstools.tapeout.transforms +package tapeout.transforms -import barstools.tapeout.transforms.stage._ +import tapeout.transforms.stage._ import firrtl._ import firrtl.annotations._ import firrtl.ir._ diff --git a/tools/tapeout/src/main/scala/transforms/retime/Retime.scala b/tools/tapeout/src/main/scala/transforms/retime/Retime.scala index 1a9d66685..321f3d424 100644 --- a/tools/tapeout/src/main/scala/transforms/retime/Retime.scala +++ b/tools/tapeout/src/main/scala/transforms/retime/Retime.scala @@ -1,6 +1,6 @@ // See LICENSE for license details. -package barstools.tapeout.transforms.retime +package tapeout.transforms.retime import chisel3.experimental.RunFirrtlTransform import firrtl.annotations._ diff --git a/tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala b/tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala index cdae1bfd7..5fdadf236 100644 --- a/tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala +++ b/tools/tapeout/src/main/scala/transforms/stage/TapeoutStage.scala @@ -1,8 +1,8 @@ // See LICENSE for license details. -package barstools.tapeout.transforms.stage +package tapeout.transforms.stage -import barstools.tapeout.transforms.GenerateModelStageMain +import tapeout.transforms.GenerateModelStageMain import chisel3.stage.ChiselCli import firrtl.stage.{RunFirrtlTransformAnnotation} import firrtl.AnnotationSeq diff --git a/tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala b/tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala index 78d33e103..5c18aa18f 100644 --- a/tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala +++ b/tools/tapeout/src/main/scala/transforms/utils/FileUtils.scala @@ -1,6 +1,6 @@ // See LICENSE for license details. -package barstools.tapeout.transforms.utils +package tapeout.transforms.utils import chisel3.experimental.{annotate, ChiselAnnotation} import firrtl._ diff --git a/tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala b/tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala index 45502d6d4..ef417e2c2 100644 --- a/tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala +++ b/tools/tapeout/src/main/scala/transforms/utils/LowerAnnotations.scala @@ -1,4 +1,4 @@ -package barstools.tapeout.transforms.utils +package tapeout.transforms.utils object LowerName { def apply(s: String): String = s.replace(".", "_").replace("[", "_").replace("]", "") diff --git a/tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala b/tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala index ef98b294e..6c277fea6 100644 --- a/tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala +++ b/tools/tapeout/src/main/scala/transforms/utils/ProgrammaticBundle.scala @@ -1,4 +1,4 @@ -package barstools.tapeout.transforms.utils +package tapeout.transforms.utils import chisel3._ diff --git a/tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala b/tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala index 9b58e083b..0dae7ffa5 100644 --- a/tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala +++ b/tools/tapeout/src/main/scala/transforms/utils/YamlHelpers.scala @@ -1,4 +1,4 @@ -package barstools.tapeout.transforms.utils +package tapeout.transforms.utils import firrtl.FileUtils import net.jcazevedo.moultingyaml._ From 088460f26636841387de2ce3980df7b0f4d3cdad Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 19 Apr 2024 11:38:00 -0700 Subject: [PATCH 273/273] Update docs to reflect in-tree barstools --- docs/Advanced-Concepts/Resources.rst | 4 ---- docs/Chipyard-Basics/Chipyard-Components.rst | 4 ++-- docs/Customization/Firrtl-Transforms.rst | 6 +++--- .../Customization/Incorporating-Verilog-Blocks.rst | 13 ------------- docs/Tools/{Barstools.rst => Tapeout-Tools.rst} | 14 +++++++------- docs/Tools/index.rst | 2 +- docs/VLSI/Basic-Flow.rst | 2 +- docs/VLSI/Building-A-Chip.rst | 2 +- 8 files changed, 15 insertions(+), 32 deletions(-) rename docs/Tools/{Barstools.rst => Tapeout-Tools.rst} (95%) diff --git a/docs/Advanced-Concepts/Resources.rst b/docs/Advanced-Concepts/Resources.rst index fbfaad60a..99125194e 100644 --- a/docs/Advanced-Concepts/Resources.rst +++ b/docs/Advanced-Concepts/Resources.rst @@ -33,7 +33,3 @@ For example: lazy val myAwesomeAccel = (project in file("generators/myAwesomeAccelFolder")) .dependsOn(rocketchip) .settings(commonSettings) - - lazy val tapeout = conditionalDependsOn(project in file("./tools/barstools/tapeout/")) - .dependsOn(myAwesomeAccel) - .settings(commonSettings) diff --git a/docs/Chipyard-Basics/Chipyard-Components.rst b/docs/Chipyard-Basics/Chipyard-Components.rst index 669bfdfc5..7fdbe26da 100644 --- a/docs/Chipyard-Basics/Chipyard-Components.rst +++ b/docs/Chipyard-Basics/Chipyard-Components.rst @@ -79,9 +79,9 @@ Tools FIRRTL enables digital circuits manipulation between Chisel elaboration and Verilog generation. See :ref:`Tools/FIRRTL:FIRRTL` for more information. -**Barstools** +**Tapeout-Tools (Formerly Barstools)** A collection of common FIRRTL transformations used to manipulate a digital circuit without changing the generator source RTL. - See :ref:`Tools/Barstools:Barstools` for more information. + See :ref:`Tools/Tapeout-Tools:Tapeout-Tools` for more information. **Dsptools** A Chisel library for writing custom signal processing hardware, as well as integrating custom signal processing hardware into an SoC (especially a Rocket-based SoC). diff --git a/docs/Customization/Firrtl-Transforms.rst b/docs/Customization/Firrtl-Transforms.rst index 1113bde63..7de7aadbd 100644 --- a/docs/Customization/Firrtl-Transforms.rst +++ b/docs/Customization/Firrtl-Transforms.rst @@ -22,18 +22,18 @@ Where to add transforms In Chipyard, the FIRRTL compiler is called multiple times to create a "Top" file that contains the DUT and a "Model" file containing the test harness, which instantiates the DUT. The "Model" file does not contain the DUT's module definition or any of its submodules. -This is done by the ``tapeout`` SBT project (located in ``tools/barstools/tapeout``) which calls ``GenerateModelStageMain`` (a function that wraps the multiple FIRRTL compiler calls and extra transforms). +This is done by the ``tapeout`` SBT project (located in ``tools/tapeout``) which calls ``GenerateModelStageMain`` (a function that wraps the multiple FIRRTL compiler calls and extra transforms). .. literalinclude:: ../../common.mk :language: make :start-after: DOC include start: FirrtlCompiler :end-before: DOC include end: FirrtlCompiler -If you look inside of the `tools/barstools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala `__ file, +If you look inside of the ``tools/tapeout/src/main/scala/transforms/GenerateModelStageMain.scala`` file, you can see that FIRRTL is invoked for "Model". Currently, the FIRRTL compiler is agnostic to the ``TOP`` and ``MODEL`` differentiation, and the user is responsible for providing annotations that will inform the compiler where(``TOP`` vs ``MODEL``) to perform the custom FIRRTL transformations. -For more information on Barstools, please visit the :ref:`Tools/Barstools:Barstools` section. +For more information on the Tapeout sub-project, please visit the :ref:`Tools/Tapeout-Tools:Tapeout-Tools` section. Examples of transforms ---------------------- diff --git a/docs/Customization/Incorporating-Verilog-Blocks.rst b/docs/Customization/Incorporating-Verilog-Blocks.rst index fde44411f..9d7ceb814 100644 --- a/docs/Customization/Incorporating-Verilog-Blocks.rst +++ b/docs/Customization/Incorporating-Verilog-Blocks.rst @@ -33,19 +33,6 @@ different directory from Chisel (Scala) sources. vsrc/ YourFile.v -In addition to the steps outlined in the previous section on adding a -project to the ``build.sbt`` at the top level, it is also necessary to -add any projects that contain Verilog IP as dependencies to the -``tapeout`` project. This ensures that the Verilog sources are visible -to the downstream FIRRTL passes that provide utilities for integrating -Verilog files into the build process, which are part of the -``tapeout`` package in ``barstools/tapeout``. - -.. code-block:: scala - - lazy val tapeout = conditionalDependsOn(project in file("./tools/barstools/tapeout/")) - .dependsOn(chisel_testers, example, yourproject) - .settings(commonSettings) For this concrete GCD example, we will be using a ``GCDMMIOBlackBox`` Verilog module that is defined in the ``chipyard`` project. The Scala diff --git a/docs/Tools/Barstools.rst b/docs/Tools/Tapeout-Tools.rst similarity index 95% rename from docs/Tools/Barstools.rst rename to docs/Tools/Tapeout-Tools.rst index fa4176c45..b522c46d0 100644 --- a/docs/Tools/Barstools.rst +++ b/docs/Tools/Tapeout-Tools.rst @@ -1,7 +1,7 @@ -Barstools +Tapeout-Tools =============================== -Barstools is a collection of useful FIRRTL transformations and compilers to help the build process. +Tapeout-Tools is a collection of useful FIRRTL transformations and compilers to help the build process. Included in the tools are a MacroCompiler (used to map Chisel memory constructs to vendor SRAMs), FIRRTL transforms (to separate harness and top-level SoC files), and more. Mapping technology SRAMs (MacroCompiler) @@ -23,16 +23,16 @@ An external module reference is a FIRRTL construct that enables a design to refe A list of unique SRAM configurations is output to a ``.conf`` file by FIRRTL, which is used to map technology SRAMs. Without this transform, FIRRTL will map all ``SeqMem`` s to flip-flop arrays with equivalent behavior, which may lead to a design that is difficult to route. -The ``.conf`` file is consumed by a tool called MacroCompiler, which is part of the :ref:`Tools/Barstools:Barstools` scala package. +The ``.conf`` file is consumed by a tool called MacroCompiler, which is part of the :ref:`Tools/Tapeout-Tools:Tapeout-Tools` scala package. MacroCompiler is also passed an ``.mdf`` file that describes the available list of technology SRAMs or the capabilities of the SRAM compiler, if one is provided by the foundry. -Typically a foundry SRAM compiler will be able to generate a set of different SRAMs collateral based on some requirements on size, aspect ratio, etc. (see :ref:`Tools/Barstools:SRAM MDF Fields`). +Typically a foundry SRAM compiler will be able to generate a set of different SRAMs collateral based on some requirements on size, aspect ratio, etc. (see :ref:`Tools/Tapeout-Tools:SRAM MDF Fields`). Using a user-customizable cost function, MacroCompiler will select the SRAMs that are the best fit for each dimensionality in the ``.conf`` file. This may include over provisioning (e.g. using a 64x1024 SRAM for a requested 60x1024, if the latter is not available) or arraying. Arraying can be done in both width and depth, as well as to solve masking constraints. For example, a 128x2048 array could be composed of four 64x1024 arrays, with two macros in parallel to create two 128x1024 virtual SRAMs which are combinationally muxed to add depth. If this macro requires byte-granularity write masking, but no technology SRAMs support masking, then the tool may choose to use thirty-two 8x1024 arrays in a similar configuration. You may wish to create a cache of your available SRAM macros either manually, or via a script. A reference script for creating a JSON of your SRAM macros is in the `asap7 technology library folder `__. -For information on writing ``.mdf`` files, look at `MDF on github `__ and a brief description in :ref:`Tools/Barstools:SRAM MDF Fields` section. +For information on writing ``.mdf`` files, look at `MDF on github `__ and a brief description in :ref:`Tools/Tapeout-Tools:SRAM MDF Fields` section. The output of MacroCompiler is a Verilog file containing modules that wrap the technology SRAMs into the specified interface names from the ``.conf``. If the technology supports an SRAM compiler, then MacroCompiler will also emit HammerIR that can be passed to Hammer to run the compiler itself and generate design collateral. @@ -105,7 +105,7 @@ This is necessary to facilitate post-synthesis and post-place-and-route simulati Simulations, after your design goes through a VLSI flow, will use the verilog netlist generated from the flow and will need an untouched test harness to drive it. Separating these components into separate files makes this straightforward. Without the separation the file that included the test harness would also redefine the DUT which is often disallowed in simulation tools. -To do this, there is a FIRRTL ``App`` in :ref:`Tools/Barstools:Barstools` called ``GenerateTopAndHarness``, which runs the appropriate transforms to elaborate the modules separately. +To do this, there is a FIRRTL ``App`` in :ref:`Tools/Tapeout-Tools:Tapeout-Tools` called ``GenerateTopAndHarness``, which runs the appropriate transforms to elaborate the modules separately. This also renames modules in the test harness so that any modules that are instantiated in both the test harness and the chip are uniquified. .. Note:: For VLSI projects, this ``App`` is run instead of the normal FIRRTL ``App`` to elaborate Verilog. @@ -133,5 +133,5 @@ This, unfortunately, breaks the process-agnostic RTL abstraction, so it is recom The simplest way to do this is to have a config fragment that when included updates instantiates the IO cells and connects them in the test harness. When simulating chip-specific designs, it is important to include the IO cells. The IO cell behavioral models will often assert if they are connected incorrectly, which is a useful runtime check. -They also keep the IO interface at the chip and test harness boundary (see :ref:`Tools/Barstools:Separating the Top module from the TestHarness module`) consistent after synthesis and place-and-route, +They also keep the IO interface at the chip and test harness boundary (see :ref:`Tools/Tapeout-Tools:Separating the Top module from the TestHarness module`) consistent after synthesis and place-and-route, which allows the RTL simulation test harness to be reused. diff --git a/docs/Tools/index.rst b/docs/Tools/index.rst index f8d0be95a..491bd60cd 100644 --- a/docs/Tools/index.rst +++ b/docs/Tools/index.rst @@ -12,4 +12,4 @@ The following pages will introduce them, and how we can use them in order to gen FIRRTL Treadle Dsptools - Barstools + Tapeout-Tools diff --git a/docs/VLSI/Basic-Flow.rst b/docs/VLSI/Basic-Flow.rst index 10a7af2f6..de1acc74f 100644 --- a/docs/VLSI/Basic-Flow.rst +++ b/docs/VLSI/Basic-Flow.rst @@ -56,7 +56,7 @@ We will do so by calling ``make buildfile`` with appropriate Chipyard configurat As in the rest of the Chipyard flows, we specify our SoC configuration using the ``CONFIG`` make variable. However, unlike the rest of the Chipyard flows, in the case of physical design we might be interested in working in a hierarchical fashion and therefore we would like to work on a single module. Therefore, we can also specify a ``VLSI_TOP`` make variable with the same of a specific Verilog module (which should also match the name of the equivalent Chisel module) which we would like to work on. -The makefile will automatically call tools such as Barstools and the MacroCompiler (:ref:`Tools/Barstools:barstools`) in order to make the generated Verilog more VLSI friendly. +The makefile will automatically call tools such as Tapeout-Tools and the MacroCompiler (:ref:`Tools/Tapeout-Tools:Tapeout-Tools`) in order to make the generated Verilog more VLSI friendly. By default, the MacroCompiler will attempt to map memories into the SRAM options within the Hammer technology plugin. However, if you are working with a new process technology and prefer to work with flip-flop arrays, you can configure the MacroCompiler using the ``TOP_MACROCOMPILER_MODE`` make variable. For example, if your technology plugin does not have an SRAM compiler ready, you can use the ``TOP_MACROCOMPILER_MODE='--mode synflops'`` option (Note that synthesizing a design with only flipflops is very slow and will often may not meet constraints). We call the ``make buildfile`` command while also specifying the name of the process technology we are working with (same ``tech_name`` for the configuration files and plugin name) and the configuration files we created. Note, in the ASAP7 tutorial ((:ref:`tutorial`)) these configuration files are merged into a single file called ``example-asap7.yml``. diff --git a/docs/VLSI/Building-A-Chip.rst b/docs/VLSI/Building-A-Chip.rst index adc798021..c635bc5f8 100644 --- a/docs/VLSI/Building-A-Chip.rst +++ b/docs/VLSI/Building-A-Chip.rst @@ -10,7 +10,7 @@ Transforming the RTL -------------------- Building a chip requires specializing the generic verilog emitted by FIRRTL to adhere to the constraints imposed by the technology used for fabrication. -This includes mapping Chisel memories to available technology macros such as SRAMs, mapping the input and output of your chip to connect to technology IO cells, see :ref:`Tools/Barstools:Barstools`. +This includes mapping Chisel memories to available technology macros such as SRAMs, mapping the input and output of your chip to connect to technology IO cells, see :ref:`Tools/Tapeout-Tools:Tapeout-tools`. In addition to these required transformations, it may also be beneficial to transform the RTL to make it more amenable to hierarchical physical design easier. This often includes modifying the logical hierarchy to match the physical hierarchy through grouping components together or flattening components into a single larger module.