From 1d91c11daabf63aa6e1f8703d6a157f4a1a31a40 Mon Sep 17 00:00:00 2001 From: Stephen Kiely Date: Sat, 2 Nov 2019 12:32:44 -0500 Subject: [PATCH 1/6] Add spellcheck script using aspell. Also corrected all the spelling issues in the current labs. Signed-off-by: Stephen Kiely --- .aspell.en.pws | 242 ++++++++++++++++++ .travis.yml | 35 ++- check-spelling.sh | 69 +++++ .../lesson-16-jinja/stage3/guide.md | 2 +- .../lesson-16-jinja/stage4/guide.md | 2 +- .../lesson-16-jinja/stage5/guide.md | 4 +- .../lesson-50-bash/stage2/guide.md | 4 +- .../lesson-50-bash/stage3/guide.md | 2 +- .../lesson-50-bash/stage4/guide.md | 2 +- .../lesson-15-stackstorm/stage3/guide.md | 2 +- lessons/tools/lesson-24-pyez/stage2/guide.md | 2 +- .../tools/lesson-25-junosjet/stage1/guide.md | 4 +- .../tools/lesson-25-junosjet/stage3/guide.md | 2 +- .../tools/lesson-25-junosjet/stage4/guide.md | 6 +- .../tools/lesson-25-junosjet/stage5/guide.md | 2 +- .../lesson-26-openconfig/stage2/guide.md | 2 +- .../lesson-26-openconfig/stage3/guide.md | 2 +- .../lesson-26-openconfig/stage4/guide.md | 2 +- lessons/tools/lesson-30-salt/stage1/guide.md | 2 +- .../tools/lesson-31-terraform/stage1/guide.md | 2 +- .../tools/lesson-31-terraform/stage3/guide.md | 2 +- .../lesson-32-stigcompliance/stage1/guide.md | 4 +- .../lesson-32-stigcompliance/stage2/guide.md | 2 +- .../lesson-32-stigcompliance/stage3/guide.md | 26 +- .../stage1/guide.md | 2 +- .../stage2/guide.md | 2 +- .../lesson-34-configbackup/stage1/guide.md | 2 +- .../stage1/guide.md | 8 +- .../stage2/guide.md | 8 +- .../stage3/guide.md | 4 +- .../stage4/guide.md | 4 +- 31 files changed, 389 insertions(+), 65 deletions(-) create mode 100644 .aspell.en.pws create mode 100644 check-spelling.sh diff --git a/.aspell.en.pws b/.aspell.en.pws new file mode 100644 index 00000000..b9d5b663 --- /dev/null +++ b/.aspell.en.pws @@ -0,0 +1,242 @@ +personal_ws-1.1 en 236 +ACL +APIs +ActionChain +ActionChain's +ActionChains +Ansible +Arista +Arista's +BGP +CMDB +ConfigTable +ConfigTables +DateTime +DevOps +Eg +FRR +FactoryLoader +GRPC +GW +Getter +Git's +HCL +Hashicorp +Hostname +hostname +hostnames +html +ICMP +IDL +IFA +IFD +IOS +Idempotency +Init +JSON +JSNAPy +Javascript +Jinja +Junos +JunosDevice +MGMT +MPLS +MQTT +MTU +Mistral +NAPALM's +NRE +NRELabs +NetConf +NetOps +NX +OpTable +OpTables +OperatingSystem +OpenConfig +OpenStack +Orquesta +OSPF +Ok +PFE +PNG +POSIX +Peasy +PyCharm +PyEAPI +PyEZ +PyEZ's +PyEz +QFX +RPC +RPC's +RPCs +RPD +SLAX +SNMP +SSL +STIG +STIGs +StackStorm +StackStorm's +Stackstorm +Sudhishna +TBD +TechLibrary +Terraform +Terraform's +ToDD +Vinayak +VLAN +VLANs +VR +XPath +XSLT +YAML +YAML's +antidotepassword +asynchronously +backend +bgp +boolean +codebase +configs +cURL +cli +cmd +codemirror +com +conf +config +de +declaratively +dev +dict +diff +distro +eAPI +eBGP +echofoo +echos +eg +endpoint +endpoints +env +etree +eventd +exportvar +extensibility +facto +filesize +filesystem +findtext +focussed +foos +fu +getopts +getter +getters +github +greenfield +gRPC +iBGP +ifl +inet +init +influxdb +ing +integrations +intf +io +ip +ipython +jinja +jnpr +jq +jsd +junos +jupyter +jweidley +kernelspec +linux +markdown +mayeates +mimetype +mtu +namespace +nbconvert +nbformat +netconf +nexthop +nonnative +openconfig +parseable +pprint +pre +preinstalled +preloaded +prepopulated +programmability +programmatically +proto +protoc +provisioner +py +pygments +qfx +raylam +readonly +repeatable +repo +rpc +rpc's +runtime +scalable +sh +shahbhoomi +sipphone +situ +sls +snmp +sourcing +squeezy +stackstorm +stdout +stp +str +subcommand +subcommands +subexecutions +substring +switchport +syntaxes +tangentially +teardown +templating +terraform +tf +tmp +toddproject +tradeoffs +txt +undercase +unordered +untracked +uplink +uplinks +uptime +vlans +vpn +vQFX +vqfx +walkthrough +xe +xml +xpath +yaml +yml +reconfiguring +VM +TCP +reachability +instantiation \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index c9ca84ff..17790ba1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,24 @@ -language: generic -notifications: - email: false -services: - - docker -before_install: - - docker pull antidotelabs/syringe:latest - - docker run -v $(pwd):/antidote antidotelabs/syringe syrctl validate /antidote -install: - - ./check-changelog.sh -sudo: false +matrix: + include: + - language: generic + notifications: + email: false + addons: + apt: + packages: + - aspell + - aspell-en + script: + - ./check-spelling.sh + + - language: generic + notifications: + email: false + services: + - docker + before_install: + - docker pull antidotelabs/syringe:latest + - docker run -v $(pwd):/antidote antidotelabs/syringe syrctl validate /antidote + install: + - ./check-changelog.sh + sudo: false \ No newline at end of file diff --git a/check-spelling.sh b/check-spelling.sh new file mode 100644 index 00000000..7fbe52f9 --- /dev/null +++ b/check-spelling.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# requires apt packages: aspell, aspell-en + +[[ "$TRAVIS_PULL_REQUEST" == "false" ]] && exit 0 # bypass script if not a pull request + +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;36m' +NC='\033[0m' # No Color + +FILES_CHANGED=`(git diff --name-only $TRAVIS_COMMIT_RANGE || true) | grep guide.md && (git diff --name-only $TRAVIS_COMMIT_RANGE || true) | grep notebook.ipynb` +# FILES_CHANGED=`git ls-tree -r spellcheck --name-only | grep guide.md && git ls-tree -r spellcheck --name-only | grep notebook.ipynb` + +if [ -z "$FILES_CHANGED" ] +then + echo -e "$GREEN>> No markdown file to check $NC" + + exit 0; +fi + +echo -e "$BLUE>> Following guide or notebook files were changed in this pull request (commit range: $TRAVIS_COMMIT_RANGE):$NC" +echo "$FILES_CHANGED" + +TOTAL_NB_MISSPELLED=0 +echo -e "$BLUE>> Running spellchecker...$NC" +for FILE in $FILES_CHANGED; do + CONTENTS=`cat $(echo "$FILE" | sed -E ':a;N;$!ba;s/\n/ /g')` + # delete markdown code blocks + CONTENTS=$(echo "$CONTENTS" | sed '/^```/,/^```/d') + # delete html pre blocks + CONTENTS=$(echo "$CONTENTS" | sed '/^
/,/^<\/pre>/d')
+    # convert markdown inline code to html code
+    CONTENTS=$(echo "$CONTENTS" | sed -E 's/(^|[^\\`])`([^`]+)`([^`]|$)/\1\2<\/code>\3/g')
+    # delete html code blocks
+    CONTENTS=$(echo "$CONTENTS" | sed -r 's/[^<]+<\/code>//g')
+    # delete html tags
+    CONTENTS=`echo "$CONTENTS" | sed -E 's/<([^<]+)>//g'`
+    # delete markdown robot code blocks
+    CONTENTS=$(echo "$CONTENTS" | sed '/^>```/,/^>```/d')
+
+    #echo -e "$BLUE>> Content that will be checked:$NC"
+    #echo "$CONTENTS"
+
+    #echo -e "$BLUE>> Running spellchecker...$NC"
+    MISSPELLED=`echo "$CONTENTS" | aspell --lang=en --encoding=utf-8 --personal=./.aspell.en.pws list | sort -u`
+
+    NB_MISSPELLED=`echo "$MISSPELLED" | wc -w`
+
+    if [ "$NB_MISSPELLED" -gt 0 ]
+    then
+        TOTAL_NB_MISSPELLED=$((TOTAL_NB_MISSPELLED+NB_MISSPELLED))
+        # echo -e "$RED>> Words that might be misspelled, please check:$NC"
+        MISSPELLED=`echo "$MISSPELLED" | sed -E ':a;N;$!ba;s/\n/, /g'`
+        # echo "$MISSPELLED"
+        echo -e "\n$RED>> $FILE $NC"
+        echo -e "$RED>> $NB_MISSPELLED words might be misspelled, please check them:$NC"
+        echo -e "$MISSPELLED"
+    fi
+done
+
+if [ "$TOTAL_NB_MISSPELLED" -gt 0 ]
+then
+    echo -e "\nTotal Misspelled words: $TOTAL_NB_MISSPELLED"
+    exit 1
+else
+        COMMENT="No spelling errors, congratulations!"
+        echo -e "$GREEN>> $COMMENT $NC"
+fi
+exit 0
\ No newline at end of file
diff --git a/lessons/fundamentals/lesson-16-jinja/stage3/guide.md b/lessons/fundamentals/lesson-16-jinja/stage3/guide.md
index e283ceac..c9b6b5cc 100644
--- a/lessons/fundamentals/lesson-16-jinja/stage3/guide.md
+++ b/lessons/fundamentals/lesson-16-jinja/stage3/guide.md
@@ -26,7 +26,7 @@ interfaces = [{'interface': 'ge-0/0/0', 'ip_address': '192.168.1.1'},
 ```
 
 
-In Part2, we generated configurations for all interfaces that were passed to the template. Wwhat if you are only interested in generating the  configuration for the management interface? For this particular example we will be using the `if` condition. It is similar to the python `if` condition, except for some slight syntax differences:
+In Part2, we generated configurations for all interfaces that were passed to the template. What if you are only interested in generating the  configuration for the management interface? For this particular example we will be using the `if` condition. It is similar to the python `if` condition, except for some slight syntax differences:
 
 ```
 {% if condition %}
diff --git a/lessons/fundamentals/lesson-16-jinja/stage4/guide.md b/lessons/fundamentals/lesson-16-jinja/stage4/guide.md
index 7dfee0fa..82c6a573 100644
--- a/lessons/fundamentals/lesson-16-jinja/stage4/guide.md
+++ b/lessons/fundamentals/lesson-16-jinja/stage4/guide.md
@@ -73,7 +73,7 @@ Now that the template is defined we will render the config template for each dev
 The Python `enumerate()` function keeps a count of loop index so that we can print the number of device we are looping over, so we can keep track of the configs in our output.
 
 `%s` in the first print statement will take the value of `device_number` for the devices in `all_devices`.
-In case you are wondering what is `print('-'*30)`, it is just to make the output more presenatable, you will see when you run the below snippet!!
+In case you are wondering what is `print('-'*30)`, it is just to make the output more presentable, you will see when you run the below snippet!!
 
 
 for dev_number, device in enumerate(all_devices, 1):
diff --git a/lessons/fundamentals/lesson-16-jinja/stage5/guide.md b/lessons/fundamentals/lesson-16-jinja/stage5/guide.md
index d003be0f..3edf453f 100644
--- a/lessons/fundamentals/lesson-16-jinja/stage5/guide.md
+++ b/lessons/fundamentals/lesson-16-jinja/stage5/guide.md
@@ -16,7 +16,7 @@ cat dir1/static_route.j2
 ```
 
 
-Start the python shell and import the `FileSystemLoader` and `Environment` for loading the Jinja template. The `env` instance allows you to use an external Jinja template using FileSystemLoader:
+Start the python shell and import the `FileSystemLoader` and `Environment` for loading the Jinja template. The `env` instance allows you to use an external Jinja template using `FileSystemLoader`:
 
 ```
 python
@@ -44,7 +44,7 @@ quit()
 
 
 ### Example: 2
-Below is the `l3_interface.j2` template stored in dir2 sub-directory.
+Below is the `l3_interface.j2` template stored in `dir2` sub-directory.
 
 ```
 cat dir2/l3_interface.j2
diff --git a/lessons/fundamentals/lesson-50-bash/stage2/guide.md b/lessons/fundamentals/lesson-50-bash/stage2/guide.md
index f92cd6df..54eca1c7 100644
--- a/lessons/fundamentals/lesson-50-bash/stage2/guide.md
+++ b/lessons/fundamentals/lesson-50-bash/stage2/guide.md
@@ -40,7 +40,7 @@ export foo
 The export command sets the "export" attribute of the variable.  This makes 'foo' an *environment variable*.  This just means that child processes of the current shell can access these variables.  The shell that is created to execute 'echofoo.sh' is a child process.  Without the export attribute being set, 'foo' is just a *shell variable.*
 
 ---
-> **_NOTE:_**  Environment variable names should be all uppercase.  Shell variables should be all undercase.  It is possible to start a variable name with something other than a letter, but don't do that.  It's not recommended.  In either case, you should use underscores to seperate words in a variable's name.
+> **_NOTE:_**  Environment variable names should be all uppercase.  Shell variables should be all undercase.  It is possible to start a variable name with something other than a letter, but don't do that.  It's not recommended.  In either case, you should use underscores to separate words in a variable's name.
 
 ---
 
@@ -56,7 +56,7 @@ cat /antidote/stage2/exportvar.sh
 ```
 
 
-Ok, so we're going to create 'bar' then call a script to echo it's value back to us.  We know this won't work.  Then we export 'bar' and call that script again and of course we should now see the value of 'bar' echo'd back.
+Ok, so we're going to create 'bar' then call a script to echo it's value back to us.  We know this won't work.  Then we export 'bar' and call that script again and of course we should now see the value of 'bar' echoed back.
 
 ```
 /antidote/stage2/exportvar.sh
diff --git a/lessons/fundamentals/lesson-50-bash/stage3/guide.md b/lessons/fundamentals/lesson-50-bash/stage3/guide.md
index f250c233..c913aabd 100644
--- a/lessons/fundamentals/lesson-50-bash/stage3/guide.md
+++ b/lessons/fundamentals/lesson-50-bash/stage3/guide.md
@@ -57,5 +57,5 @@ Now let's run it with just three parameters to see what happens.
 
 
 
-Excellent!  Now let's try another way you can pass parameters to a BASH script with the 'getopts' builtin BASH utility.
+Excellent!  Now let's try another way you can pass parameters to a BASH script with the 'getopts' built-in BASH utility.
 
diff --git a/lessons/fundamentals/lesson-50-bash/stage4/guide.md b/lessons/fundamentals/lesson-50-bash/stage4/guide.md
index f250c233..c913aabd 100644
--- a/lessons/fundamentals/lesson-50-bash/stage4/guide.md
+++ b/lessons/fundamentals/lesson-50-bash/stage4/guide.md
@@ -57,5 +57,5 @@ Now let's run it with just three parameters to see what happens.
 
 
 
-Excellent!  Now let's try another way you can pass parameters to a BASH script with the 'getopts' builtin BASH utility.
+Excellent!  Now let's try another way you can pass parameters to a BASH script with the 'getopts' built-in BASH utility.
 
diff --git a/lessons/tools/lesson-15-stackstorm/stage3/guide.md b/lessons/tools/lesson-15-stackstorm/stage3/guide.md
index 3146d4a7..96fe273e 100644
--- a/lessons/tools/lesson-15-stackstorm/stage3/guide.md
+++ b/lessons/tools/lesson-15-stackstorm/stage3/guide.md
@@ -26,7 +26,7 @@ The `examples` pack, which is one of the few built-in StackStorm packs, is prelo
 
 [ActionChains](https://docs.stackstorm.com/actionchain.html) are the simplest (but also the least robust) workflow option in StackStorm. If you want to run a sequence of actions, with some minimal error-handling, ActionChains are probably sufficient for your purposes. They're the "bare minimum" workflows option in StackStorm.
 
-The "hello world" example for ActionChains has to be the "echochain" - in particular, `examples.echochain_param`. This is a simple chain that takes a few parameters, and uses them as variables, inserted into a set of "echo" commands, resulting in some text in stdout:
+The "hello world" example for ActionChains has to be the `echochain` - in particular, `examples.echochain_param`. This is a simple chain that takes a few parameters, and uses them as variables, inserted into a set of "echo" commands, resulting in some text in stdout:
 
 ```
 cat /opt/stackstorm/packs/examples/actions/chains/echochain_param.yaml
diff --git a/lessons/tools/lesson-24-pyez/stage2/guide.md b/lessons/tools/lesson-24-pyez/stage2/guide.md
index 7c55bd07..543c1e6c 100644
--- a/lessons/tools/lesson-24-pyez/stage2/guide.md
+++ b/lessons/tools/lesson-24-pyez/stage2/guide.md
@@ -33,7 +33,7 @@ dev.open()
 
 This is a much simpler way to accomplish the same thing we did manually in the previous section!
 
-You should see `Device(vqfx)` in the terminal; that means PyEZ is successfully connect and authenticate to the Junos device, and return the Device object. If you're trying to implement this in your own environment, you may see an Exception raised, like a ConnectTimeoutError or ConnectAuthError. These mean that PyEZ cannot reach your device's NETCONF API, or that your credentials are invalid, respectively.
+You should see `Device(vqfx)` in the terminal; that means PyEZ is successfully connect and authenticate to the Junos device, and return the Device object. If you're trying to implement this in your own environment, you may see an Exception raised, like a `ConnectTimeoutError` or `ConnectAuthError`. These mean that PyEZ cannot reach your device's NETCONF API, or that your credentials are invalid, respectively.
 
 Next, we'll use a special `pprint()` function to print this device's basic information in a way that's easy for us to read.
 
diff --git a/lessons/tools/lesson-25-junosjet/stage1/guide.md b/lessons/tools/lesson-25-junosjet/stage1/guide.md
index 5ac45e48..06153fba 100644
--- a/lessons/tools/lesson-25-junosjet/stage1/guide.md
+++ b/lessons/tools/lesson-25-junosjet/stage1/guide.md
@@ -19,7 +19,7 @@ There are two types of services JET provides:
 * Request-response - An application can issue a request and wait for the response from Junos OS. (RPC model, gRPC based)
 * Notification - An application can receive asynchronous notification of events happening on Junos OS. (publish-subscribe model. MQTT based)
 
-For more informations about the JET can be found here.
+For more information about the JET can be found here.
 
 In this lab we are going to explore a off-box python JET application.
 
@@ -45,4 +45,4 @@ show system connections | match LISTEN | match "\.1883|\.32767"
 ```
 
 
-Now the Junos OS device is ready for off-box JET applications and it's time to get some action!  In the next chapter, we'll go through the notifiaction mechanism and collect some events from the MQTT event bus.
+Now the Junos OS device is ready for off-box JET applications and it's time to get some action!  In the next chapter, we'll go through the notification mechanism and collect some events from the MQTT event bus.
diff --git a/lessons/tools/lesson-25-junosjet/stage3/guide.md b/lessons/tools/lesson-25-junosjet/stage3/guide.md
index cfaabccd..8dc0944e 100644
--- a/lessons/tools/lesson-25-junosjet/stage3/guide.md
+++ b/lessons/tools/lesson-25-junosjet/stage3/guide.md
@@ -22,7 +22,7 @@ We will use the protoc compiler to compile the IDL file. You can download the ID
 
 It's time to start the lab!
 
-To save time, the IDL file is pre-downloaded already. First we need to unarchive it:
+To save time, the IDL file is pre-downloaded already. First we need to extract it:
 
 ```
 cd /antidote
diff --git a/lessons/tools/lesson-25-junosjet/stage4/guide.md b/lessons/tools/lesson-25-junosjet/stage4/guide.md
index 33004212..84729b0c 100644
--- a/lessons/tools/lesson-25-junosjet/stage4/guide.md
+++ b/lessons/tools/lesson-25-junosjet/stage4/guide.md
@@ -9,8 +9,8 @@
 In this stage, we demonstrate additional JET API capability by using JET firewall API to insert a new firewall filter to vQFX.
 
 
-#### Preperation
-Firstly, we repeat what we have done in previous stage - compile the IDL package, go to Python interactive prompt, import the JET GPRC module, and then login to the vQFX.
+#### Preparation
+Firstly, we repeat what we have done in previous stage - compile the IDL package, go to Python interactive prompt, import the JET GRPC module, and then login to the vQFX.
 
 ```
 cd /antidote
@@ -129,4 +129,4 @@ show firewall log
 ```
 
 
-This concludes our Junos JET gRPC demostration. In the next lesson are we going explore closed loop automation by employing both JET Notification Service and JET RPC.
+This concludes our Junos JET gRPC demonstration. In the next lesson are we going explore closed loop automation by employing both JET Notification Service and JET RPC.
diff --git a/lessons/tools/lesson-25-junosjet/stage5/guide.md b/lessons/tools/lesson-25-junosjet/stage5/guide.md
index 9d680396..dcb7ba22 100644
--- a/lessons/tools/lesson-25-junosjet/stage5/guide.md
+++ b/lessons/tools/lesson-25-junosjet/stage5/guide.md
@@ -91,6 +91,6 @@ show firewall log
 
 The firewall log should capture the ping traffic. This verifies a firewall filter can be automatically provisioned to the interface dynamically without modifying the Junos configuration.
 
-This JET API automation capability opens up a whole new world to design and develop business logics within the network, such as customized traffic engineering, dynamic network protection, and so on.
+This JET API automation capability opens up a whole new world to design and develop business logic within the network, such as customized traffic engineering, dynamic network protection, and so on.
 
 This concludes our introduction to closed-loop automation using JET services and we hope you enjoy this course!
diff --git a/lessons/tools/lesson-26-openconfig/stage2/guide.md b/lessons/tools/lesson-26-openconfig/stage2/guide.md
index 8daa3dc4..f9ae56cb 100644
--- a/lessons/tools/lesson-26-openconfig/stage2/guide.md
+++ b/lessons/tools/lesson-26-openconfig/stage2/guide.md
@@ -48,7 +48,7 @@ cat openconfig-bgp.conf
 
 As you can see, the OpenConfig BGP schema contains common data that BGP requires. This configuration should be able to be provisioned to any network devices that support netconf with OpenConfig.
 
-Here we are using PyEZ (a python module for Junos Netconf connecitity) as a netconf client.
+Here we are using PyEZ (a python module for Junos Netconf connectivity) as a netconf client.
 Start a Python interactive prompt, then load the PyEZ module and create a Junos device object.
 
 _(If you're not familiar with PyEZ, here is the course [Junos Automation with PyEZ](https://labs.networkreliability.engineering/labs/?lessonId=24&lessonStage=1)!)_
diff --git a/lessons/tools/lesson-26-openconfig/stage3/guide.md b/lessons/tools/lesson-26-openconfig/stage3/guide.md
index a5eba690..5d2c4cc1 100644
--- a/lessons/tools/lesson-26-openconfig/stage3/guide.md
+++ b/lessons/tools/lesson-26-openconfig/stage3/guide.md
@@ -10,7 +10,7 @@ OpenConfig supports a variety of data models including BGP, interfaces, routing,
 
 Yang is a data modeling language for the Netconf protocol. For information about Yang, see [RFC 6020](https://tools.ietf.org/html/rfc6020).
 
-As discussed in the beginning of the course, the YANG configuration will be converted to device specific configuration via a translation mechanism. In Junos this translation mechansim is implemented by translation scripts.
+As discussed in the beginning of the course, the YANG configuration will be converted to device specific configuration via a translation mechanism. In Junos this translation mechanism is implemented by translation scripts.
 
 #### Custom YANG Modules with Junos
 Juniper offers OpenConfig translation scripts to convert OpenConfig based configuration data into Junos.
diff --git a/lessons/tools/lesson-26-openconfig/stage4/guide.md b/lessons/tools/lesson-26-openconfig/stage4/guide.md
index 8a2e3ffd..007ef9cc 100644
--- a/lessons/tools/lesson-26-openconfig/stage4/guide.md
+++ b/lessons/tools/lesson-26-openconfig/stage4/guide.md
@@ -71,6 +71,6 @@ show configuration | display translation-script translated-config | no-more
 ```
 
 
-To conclude we demostrated how to provision Junos device using custom YANG modules via CLI and NETCONF. Custom YANG modules is flexible that it can be used to define custom service oriented configration models to suit your business needs. Please feel free to to modify the configuration by yourself and see how the translation script helps you to provision the corresponding Junos configuration.
+To conclude we demonstrated how to provision Junos device using custom YANG modules via CLI and NETCONF. Custom YANG modules is flexible that it can be used to define custom service oriented configuration models to suit your business needs. Please feel free to to modify the configuration by yourself and see how the translation script helps you to provision the corresponding Junos configuration.
 
 For further information about custom YANG on Junos please visit Juniper TechLibrary "[Understanding the Management of Nonnative YANG Modules on Devices Running Junos OS](https://www.juniper.net/documentation/en_US/junos/topics/concept/netconf-yang-modules-custom-managing-overview.html)"
diff --git a/lessons/tools/lesson-30-salt/stage1/guide.md b/lessons/tools/lesson-30-salt/stage1/guide.md
index dc7d795f..618b1032 100644
--- a/lessons/tools/lesson-30-salt/stage1/guide.md
+++ b/lessons/tools/lesson-30-salt/stage1/guide.md
@@ -11,7 +11,7 @@ infrastructure, including networks. Salt is open source and it comes packaged wi
 
 Salt has a server-agent architecture where the Salt Master is the server and the agent is installed in the Salt Minions. The role of the Salt Master is to manage the state of the infrastructure.
 
-The Salt Master and the Salt Minion can run on seprate machines or can run on the same machine itself. In our case, the Salt Master and Minion are running on the same machine. We'll begin by making sure the `salt-minion` and `salt-master` services are started:
+The Salt Master and the Salt Minion can run on separate machines or can run on the same machine itself. In our case, the Salt Master and Minion are running on the same machine. We'll begin by making sure the `salt-minion` and `salt-master` services are started:
 
 ```
 service salt-master restart
diff --git a/lessons/tools/lesson-31-terraform/stage1/guide.md b/lessons/tools/lesson-31-terraform/stage1/guide.md
index c5104a31..69d67c6e 100644
--- a/lessons/tools/lesson-31-terraform/stage1/guide.md
+++ b/lessons/tools/lesson-31-terraform/stage1/guide.md
@@ -13,7 +13,7 @@ Don't forget your network chops in this lesson. You'll configure an interface, a
 
 *Image borrowed from the Terraform website: terraform.io*
 
-Terraform for traditional networking is relatively unchartered ground because we don't think about switches, routers or firewalls as a set of immutable resources, or put in another way, things that can be created and destroyed easily. Imagine cutting out the bits you don't use and soldering them back in? Mentally it feels like a step, but in reality we can deal with this as an abstracted construct. This makes us think about things tangentially from what we're used to, but with little effort and thanks to NRE Labs, you can get a feel for how this works!
+Terraform for traditional networking is relatively uncharted ground because we don't think about switches, routers or firewalls as a set of immutable resources, or put in another way, things that can be created and destroyed easily. Imagine cutting out the bits you don't use and soldering them back in? Mentally it feels like a step, but in reality we can deal with this as an abstracted construct. This makes us think about things tangentially from what we're used to, but with little effort and thanks to NRE Labs, you can get a feel for how this works!
 
 Before we proceed, we need to move to the right directory. Terraform is ran in a separate directory so we can move between stages within this lesson and knowledge build.
 
diff --git a/lessons/tools/lesson-31-terraform/stage3/guide.md b/lessons/tools/lesson-31-terraform/stage3/guide.md
index 3a4d1cb0..361d0ca3 100644
--- a/lessons/tools/lesson-31-terraform/stage3/guide.md
+++ b/lessons/tools/lesson-31-terraform/stage3/guide.md
@@ -108,7 +108,7 @@ terraform apply -auto-approve
 ```
 
 
-Now the BGP session remote peer address has been changed and you're free to check the configuration group configuration entry on `vqfx` to gain evidence of this wizardy.
+Now the BGP session remote peer address has been changed and you're free to check the configuration group configuration entry on `vqfx` to gain evidence of this wizardry.
 
 Peaking under the hood, the Junos Terraform provider destroys the group by NETCONF then re-creates it with the same ID. In the grand scheme of things, it looks like an edit, but it's actually a full resource re-build.
 
diff --git a/lessons/workflows/lesson-32-stigcompliance/stage1/guide.md b/lessons/workflows/lesson-32-stigcompliance/stage1/guide.md
index 0b5b1181..405bb011 100644
--- a/lessons/workflows/lesson-32-stigcompliance/stage1/guide.md
+++ b/lessons/workflows/lesson-32-stigcompliance/stage1/guide.md
@@ -38,7 +38,7 @@ napalm --user=antidote --password=antidotepassword --vendor=junos vqfx1 validate
 
 
 There's a lot to go through in the resulting JSON output, but the important thing is the high-level key "complies" has a value of `false`.
-This means that the test we wrote to assert that the SNMP community string adheres to V-3969 is showing noncompliance on this device.
+This means that the test we wrote to assert that the SNMP community string adheres to V-3969 is showing non-compliance on this device.
 
 We can get this test to pass by reconfiguring `vqfx1` to comply with V-3969 by setting the community string to read only:
 
@@ -88,7 +88,7 @@ napalm --user=antidote --password=antidotepassword --vendor=junos vqfx1 validate
 
 
 This lab has intentionally left a few things out:
-- It doesn't ensure compliance. It just detects noncompliance in an automated way. This is very valuable, but even more valuable is the ability to couple these tests with something like a Python script or Ansible playbook to perform the necessary compliance changes automatically when a violation is detected.
+- It doesn't ensure compliance. It just detects non-compliance in an automated way. This is very valuable, but even more valuable is the ability to couple these tests with something like a Python script or Ansible playbook to perform the necessary compliance changes automatically when a violation is detected.
 - Obviously, there are many other findings you can write tests for that we haven't included here. You should [take a look](https://stigviewer.com/stig/infrastructure_router__juniper/) at the other findings, and consider writing a test for them here as practice.
 
 In the next lab, we'll look at doing the same thing with another tool we've looked at before - JSNAPy.
diff --git a/lessons/workflows/lesson-32-stigcompliance/stage2/guide.md b/lessons/workflows/lesson-32-stigcompliance/stage2/guide.md
index def81e85..b2e4eaf2 100644
--- a/lessons/workflows/lesson-32-stigcompliance/stage2/guide.md
+++ b/lessons/workflows/lesson-32-stigcompliance/stage2/guide.md
@@ -78,7 +78,7 @@ Another advantage of using JSNAPy is, due to the fact that it uses XPath to loca
 
 The XPath expression `//bgp-peer` matches all BGP peers configured on the device. Once these are located, the actual test being run is that the node `bgp-option-information/authentication-configured` exists. This is what Junos places in the body of the response to the RPC `get-bgp-neighbor-information` when a certain peer is configured with authentication.
 
-Since our BGP peers are not configured with authentication, our previous testrun failed. Let's first get our device compliant once more:
+Since our BGP peers are not configured with authentication, our previous test run failed. Let's first get our device compliant once more:
 
 ```
 configure
diff --git a/lessons/workflows/lesson-32-stigcompliance/stage3/guide.md b/lessons/workflows/lesson-32-stigcompliance/stage3/guide.md
index 9cf00367..5c300aa9 100644
--- a/lessons/workflows/lesson-32-stigcompliance/stage3/guide.md
+++ b/lessons/workflows/lesson-32-stigcompliance/stage3/guide.md
@@ -1,13 +1,13 @@
 # Automated STIG Compliance Validation
 ## Part 3  - STIG Compliance Validation with custom Python scripts
 
-In the previous labs, we used NAPALM and JSNAPy to check the [STIG for Juniper devices](https://stigviewer.com/stig/infrastructure_router__juniper/) were found to be in compliance for the V-3969 finding.  NAPALM and JSNAPy are great for many compliance checks like looking for the existence of a configuration setting, but they may fall short when the check requires more detailed analysis of the network devices configuration and operational state or we need some "glue" to bind mutiple compliance checks together or report back findings in a specific manner.  
+In the previous labs, we used NAPALM and JSNAPy to check the [STIG for Juniper devices](https://stigviewer.com/stig/infrastructure_router__juniper/) were found to be in compliance for the V-3969 finding.  NAPALM and JSNAPy are great for many compliance checks like looking for the existence of a configuration setting, but they may fall short when the check requires more detailed analysis of the network devices configuration and operational state or we need some "glue" to bind multiple compliance checks together or report back findings in a specific manner.  
 
 In this lab, we'll look at what it takes to automate a STIG compliance check using python scripts and leveraging the [PyEZ framework](https://labs.networkreliability.engineering/labs/?lessonId=24&lessonStage=1) and [PyEZ Tables and Views] (https://labs.networkreliability.engineering/labs/?lessonId=24&lessonStage=5). We'll write our own custom table to retrieve specific configuration items to make it easier to deal with XML formatted data.  
 
-Custom Op and Config tables are written in [YAML](https://labs.networkreliability.engineering/labs/?lessonId=14&lessonStage=1), their usage wth PyEZ is documented [here](https://pyez.readthedocs.io/en/latest/TableView.html).
+Custom Op and Config tables are written in [YAML](https://labs.networkreliability.engineering/labs/?lessonId=14&lessonStage=1), their usage with PyEZ is documented [here](https://pyez.readthedocs.io/en/latest/TableView.html).
 
-We'll begin by starting up the python interpretter, defining a PyEZ device and connecting to 'vqfx1'.
+We'll begin by starting up the python interpreter, defining a PyEZ device and connecting to 'vqfx1'.
 
 ```
 python -Wi
@@ -27,7 +27,7 @@ show configuration snmp | display xml
 ``` 
 
 
-Since we are examining the configuration, we need to use a ConfigTable, which maps XML paths, elements and attributes into easier to understand and parse YAML syntax.  We need a list of communities, and their authorization level.  All of the relevant configuration we need to check is located under the XML element `community` with a parent of `snmp`.  We can translate this into an XPATH of `snmp/community` to use in our queries.  The communities are all listed at the XPATH `snmp/community`, so we will define a table called `SNMPTable`, and instruct it to fetch the configuration that matches this XPATH statment with a `get` instruction.  This will create a nested dictionary of element names to their values starting at our XPATH.
+Since we are examining the configuration, we need to use a ConfigTable, which maps XML paths, elements and attributes into easier to understand and parse YAML syntax.  We need a list of communities, and their authorization level.  All of the relevant configuration we need to check is located under the XML element `community` with a parent of `snmp`.  We can translate this into an XPATH of `snmp/community` to use in our queries.  The communities are all listed at the XPATH `snmp/community`, so we will define a table called `SNMPTable`, and instruct it to fetch the configuration that matches this XPATH statement with a `get` instruction.  This will create a nested dictionary of element names to their values starting at our XPATH.
 
 We can save this in python to a variable we'll call `SNMPYAML`.
 
@@ -63,7 +63,7 @@ globals().update(FactoryLoader().load(yaml.load(SNMPYAML)))
 ```
 
 
-We can then fetch the configuraiton from the device. After the following snippet is run, you should see that we successfully retrieved 1 item, matching the number of communities that we have defined on vqfx1.
+We can then fetch the configuration from the device. After the following snippet is run, you should see that we successfully retrieved 1 item, matching the number of communities that we have defined on vqfx1.
 
 ```
 SNMPTable(dev).get()
@@ -79,7 +79,7 @@ type(SNMPTable(dev).get())
 
 
 
-Using the builtin python `dir` function, we can take a quick peek at all of the attributes and objects that are part of our SNMPTable.
+Using the built-in python `dir` function, we can take a quick peek at all of the attributes and objects that are part of our `SNMPTable`.
 
 ```
 dir(SNMPTable(dev).get())
@@ -145,7 +145,7 @@ EOF
 
 
 
-We'll create an accompaning python file to allow us to import this YAML file as a module, doing the work of `FactoryLoader` above.  
+We'll create an accompanying python file to allow us to import this YAML file as a module, doing the work of `FactoryLoader` above.  
 
 ```
 cat > tables/config_tables.py << EOF
@@ -175,7 +175,7 @@ EOF
 ```
 
 
-Next, we'll import all of the python modules needed to run our script, starting with the `Device` module from the `jnpr.junos` package.  We'll also import our `SNMPTable` ConfitTable we created earlier and placed in the `tables` directory, as well as a `warnings` module which we'll use to clean up some of our output at runtime.
+Next, we'll import all of the python modules needed to run our script, starting with the `Device` module from the `jnpr.junos` package.  We'll also import our `SNMPTable` Config Table we created earlier and placed in the `tables` directory, as well as a `warnings` module which we'll use to clean up some of our output at runtime.
 
 ```
 cat >> V_3969.py << EOF
@@ -188,7 +188,7 @@ EOF
 ```
 
 
-We'll turn the brunt of our code into a Python function for the checking done above.  This check is for a STIG Rule called `NET0984`, which is a component of the STIG vulnerability `V-3969`.  A STIG vulnerability can consist of multiple rules, however in this case the only rule that we need to check is `NET0984`.  It will operate on any PyEZ `junpr.junos` Device which we'll pass to the function as an argument.  We'll add in a variable `check_pass` to keep track if we had any communties that violated our check for an overall pass/fail grade, some comments, and print statements that give some more information about what we've found, and what we need to do to fix any security vulnerabilities encountered.
+We'll turn the brunt of our code into a Python function for the checking done above.  This check is for a STIG Rule called `NET0984`, which is a component of the STIG vulnerability `V-3969`.  A STIG vulnerability can consist of multiple rules, however in this case the only rule that we need to check is `NET0984`.  It will operate on any PyEZ `junpr.junos` Device which we'll pass to the function as an argument.  We'll add in a variable `check_pass` to keep track if we had any communities that violated our check for an overall pass/fail grade, some comments, and print statements that give some more information about what we've found, and what we need to do to fix any security vulnerabilities encountered.
 
 At the end of our function, we'll return our pass/fail grade.
 
@@ -206,12 +206,12 @@ def NET0894(device):
 
     # Some extra information on what the script is doing
     print "CHECKING NET0894: This examines the configuration for",
-    print "SNMPv2 communties with write access."
+    print "SNMPv2 communities with write access."
 
     # Retrieve the SNMP configuration table
     snmp = SNMPTable(device).get()
 
-    # Loop through all the communties configured on the device
+    # Loop through all the communities configured on the device
     for mydev in snmp:
         # check that the authorization is 'read-only'
         if mydev.authorization != "read-only":
@@ -239,7 +239,7 @@ EOF
 ```
 
 
-Then we'll add the main loop.  First it will define our PyEZ Device for `vqfx1`, then call our function using this device as the argument.  Then depending on what we receive back from our function, we'll print an overall pass/fail grade, and finallly nicely close the connection to `vqfx1`.
+Then we'll add the main loop.  First it will define our PyEZ Device for `vqfx1`, then call our function using this device as the argument.  Then depending on what we receive back from our function, we'll print an overall pass/fail grade, and finally nicely close the connection to `vqfx1`.
 
 ```python
 cat >> V_3969.py << EOF
@@ -282,7 +282,7 @@ chmod a+x V_3969.py
 ```
 
 
-To make things interesting, we'll add in some new SNMP community vulnerabilies onto `vqfx1`.
+To make things interesting, we'll add in some new SNMP community vulnerabilities onto `vqfx1`.
 ```
 configure
 set snmp community public 
diff --git a/lessons/workflows/lesson-33-quickdeviceinventory/stage1/guide.md b/lessons/workflows/lesson-33-quickdeviceinventory/stage1/guide.md
index e739272c..317cc84a 100644
--- a/lessons/workflows/lesson-33-quickdeviceinventory/stage1/guide.md
+++ b/lessons/workflows/lesson-33-quickdeviceinventory/stage1/guide.md
@@ -52,7 +52,7 @@ print dev.facts['hostname'] + ";" + dev.facts['model']
 
-Thats it....Easy Peasy Lemon squeezy!! :) +That's it....Easy Peasy Lemon squeezy!! :) In the next lab, we'll take these same concepts to support gathering information from multiple devices. diff --git a/lessons/workflows/lesson-33-quickdeviceinventory/stage2/guide.md b/lessons/workflows/lesson-33-quickdeviceinventory/stage2/guide.md index 3ec1d279..c5b31d4b 100644 --- a/lessons/workflows/lesson-33-quickdeviceinventory/stage2/guide.md +++ b/lessons/workflows/lesson-33-quickdeviceinventory/stage2/guide.md @@ -50,7 +50,7 @@ print("HOSTNAME;MODEL;SERIAL-NUMBER;JUNOS-VERSION") Now this is where the magic happens! We are going to create a `for` loop so we can perform a series of actions on each device in the list (i.e. YAML file). The first thing we do is create the `dev` variable that includes the device hostname and login credentials. Next we `open` a NETCONF connection to the device so we can query the `facts`. -Then we `print` the specific keys from the facts dictionary on the same line separated by a semicolon. Thats it and all that is left is to `close` the connection to the device. +Then we `print` the specific keys from the facts dictionary on the same line separated by a semicolon. That's it and all that is left is to `close` the connection to the device.
 for device in deviceList:
diff --git a/lessons/workflows/lesson-34-configbackup/stage1/guide.md b/lessons/workflows/lesson-34-configbackup/stage1/guide.md
index b35e9c25..d7f17840 100644
--- a/lessons/workflows/lesson-34-configbackup/stage1/guide.md
+++ b/lessons/workflows/lesson-34-configbackup/stage1/guide.md
@@ -6,7 +6,7 @@
 
 ### Part 1  - Single Device Backup
 
-Having an up to date device configuration is essential for disater recovery in the event of device failure or natural disaster. Being able to automate backup configurations makes recovery of the device that much easier (especially if it's a mission critical device). In this lesson we will pull the configuration from a single device and store it in **"display set"** format. 
+Having an up to date device configuration is essential for disaster recovery in the event of device failure or natural disaster. Being able to automate backup configurations makes recovery of the device that much easier (especially if it's a mission critical device). In this lesson we will pull the configuration from a single device and store it in **"display set"** format. 
 
 First we will start the Python interactive shell, load the PyEz module so we can communicate with the Junos devices and load the `lxml` module to format the XML data returned from the Junos device.
 
diff --git a/lessons/workflows/lesson-35-devicespecifictemplate/stage1/guide.md b/lessons/workflows/lesson-35-devicespecifictemplate/stage1/guide.md
index bcc34338..07bed2c2 100644
--- a/lessons/workflows/lesson-35-devicespecifictemplate/stage1/guide.md
+++ b/lessons/workflows/lesson-35-devicespecifictemplate/stage1/guide.md
@@ -8,10 +8,10 @@
 
 Having the ability to easily generate a device specific configuration from an approved template is a **HUGE** time saver and it provides the consistency that you need to avoid common configuration errors.
 
-In this section, we will do a quick review of the key concepts required to accomplish this task; specificially YAML and Jinja2. View the Lesson Diagram to see a visual representation of the process. For a complete review of [YAML](https://labs.networkreliability.engineering/labs/?lessonId=14&lessonStage=1) and [Jinja2](https://labs.networkreliability.engineering/labs/?lessonId=16&lessonStage=1) please go through those lessons in NRE Labs.
+In this section, we will do a quick review of the key concepts required to accomplish this task; specifically YAML and Jinja2. View the Lesson Diagram to see a visual representation of the process. For a complete review of [YAML](https://labs.networkreliability.engineering/labs/?lessonId=14&lessonStage=1) and [Jinja2](https://labs.networkreliability.engineering/labs/?lessonId=16&lessonStage=1) please go through those lessons in NRE Labs.
 
 #### Device Template File
-The most important part of the template generation process is to have a device template with a known good configuration. The configuration in this template usually has to be approved by a configuration control board and the security team. The approved template is then modified to include variables using  Jinja syntax so the substitions can be done by the script.
+The most important part of the template generation process is to have a device template with a known good configuration. The configuration in this template usually has to be approved by a configuration control board and the security team. The approved template is then modified to include variables using  Jinja syntax so the substitutions can be done by the script.
 
 Lets take a look at our sample configuration template that has already had the Jinja syntax added.
 
@@ -20,7 +20,7 @@ more template.j2
 
-Jinja2 variables can be almost any combinations of numbers or lower case or upper case letters but in this example we will use all captial letters so they stand out better. The variables are surrounded by double curly brackets, for example **{{ HOSTNAME }}**. In the output look for the three template variables (**HOSTNAME, MGMT\_IP and DEFAULT\_GW**). Pay attention to these variable names because they will be set to specific values in the next section using YAML. +Jinja2 variables can be almost any combinations of numbers or lower case or upper case letters but in this example we will use all capital letters so they stand out better. The variables are surrounded by double curly brackets, for example **{{ HOSTNAME }}**. In the output look for the three template variables (**HOSTNAME, MGMT\_IP and DEFAULT\_GW**). Pay attention to these variable names because they will be set to specific values in the next section using YAML. #### YAML Review YAML is a human friendly data serialization standard but what does that mean? It means that it is a way to format data so that it is easy for humans to read and edit. The data in the YAML file will be used to make substitutions in the device template. We have a sample YAML file with prepopulated data. @@ -55,7 +55,7 @@ print my_vars
-In the output you can see the upper case keys and their coresponding values, for example **'HOSTNAME': 'ex4300-3'**. +In the output you can see the upper case keys and their corresponding values, for example **'HOSTNAME': 'ex4300-3'**. #### Template Generation Now we will generate a configuration based on the device template and YAML data. This is done using Jinja2 so we have to import the Jinja2 module. diff --git a/lessons/workflows/lesson-35-devicespecifictemplate/stage2/guide.md b/lessons/workflows/lesson-35-devicespecifictemplate/stage2/guide.md index bbe99d9f..d5145a42 100644 --- a/lessons/workflows/lesson-35-devicespecifictemplate/stage2/guide.md +++ b/lessons/workflows/lesson-35-devicespecifictemplate/stage2/guide.md @@ -6,12 +6,12 @@ ### Part 2 - Multiple Devices -The examples in the previous lesson were purposefully simple so the basic concepts could be described in a way that is easy to understand. In this section we will create a template generator that creates device specific configurations for mulitple devices. +The examples in the previous lesson were purposefully simple so the basic concepts could be described in a way that is easy to understand. In this section we will create a template generator that creates device specific configurations for multiple devices. The sample project we will use is deploying a large number of access switches. When deploying devices it is common for them to have a base configuration to include hostname, management IP address and a default gateway. #### Device Template File -In this section we will use the same template file that we used in the previous lesson. Look for the template variables in all captial letters surrounded by double curly brackets, for example **{{ HOSTNAME }}**. Pay attention to these variables because they will be set to specific values in the next section using YAML. +In this section we will use the same template file that we used in the previous lesson. Look for the template variables in all capital letters surrounded by double curly brackets, for example **{{ HOSTNAME }}**. Pay attention to these variables because they will be set to specific values in the next section using YAML.
 cd /antidote/stage2
 more template.j2
@@ -69,7 +69,7 @@ template = Template(template_data)
 
-Lastly we will render the template based on the data from the YAML file. Since we are dealing with multiple devices we have to use a loop to process each device defined in the YAML file. The loop will run through the `my_vars` list processing each element one at a time, making the substitutions of the key/value pairs until there arent any elements left. +Lastly we will render the template based on the data from the YAML file. Since we are dealing with multiple devices we have to use a loop to process each device defined in the YAML file. The loop will run through the `my_vars` list processing each element one at a time, making the substitutions of the key/value pairs until there aren't any elements left.
 for device in my_vars:
@@ -115,7 +115,7 @@ cat ex4300-3.conf
 The session is complete but if you want to play around on your own, here are a couple of things to try.
 
 1. Add another device to variables.yml file and regenerate the configuration files
-2. Add a line to the template.j2 file with a new variable, add the coresponding variable to variables.yml and regenerate the configuration files
+2. Add a line to the template.j2 file with a new variable, add the corresponding variable to variables.yml and regenerate the configuration files
 
 Regenerating the configuration files can be done using the `build-configs.py` script.
 
diff --git a/lessons/workflows/lesson-35-devicespecifictemplate/stage3/guide.md b/lessons/workflows/lesson-35-devicespecifictemplate/stage3/guide.md
index 686b475c..70ae5dd7 100644
--- a/lessons/workflows/lesson-35-devicespecifictemplate/stage3/guide.md
+++ b/lessons/workflows/lesson-35-devicespecifictemplate/stage3/guide.md
@@ -8,7 +8,7 @@
 
 The examples in the previous lesson were a little more realistic and in this section we'll take it to the next level. 
 
-There are times that you need to be able to create multiples of the same configuration line using different data sets. We will continue with the sample project of deploying a number of access switches. To explain this concept we will use uplink ports on Access switches. Access switches usually have two connections, one to each upstream distribution switch. In most situations those uplink ports would be the same on every access switch, but not always. In this session we will show you how to add those uplinks in a quick and consistant way but still allowing flexibility to change them as necessary.
+There are times that you need to be able to create multiples of the same configuration line using different data sets. We will continue with the sample project of deploying a number of access switches. To explain this concept we will use uplink ports on Access switches. Access switches usually have two connections, one to each upstream distribution switch. In most situations those uplink ports would be the same on every access switch, but not always. In this session we will show you how to add those uplinks in a quick and consistent way but still allowing flexibility to change them as necessary.
 
 
 #### YAML Variables File 
@@ -73,7 +73,7 @@ template = Template(template_data)
 
-Lastly we will render the template based on the data. Since we are dealing with multiple devices we have to use a loop to process each device defined in the YAML directionary. +Lastly we will render the template based on the data. Since we are dealing with multiple devices we have to use a loop to process each device defined in the YAML dictionary.
 for device in my_vars:
diff --git a/lessons/workflows/lesson-35-devicespecifictemplate/stage4/guide.md b/lessons/workflows/lesson-35-devicespecifictemplate/stage4/guide.md
index e60c0bff..58d5f244 100644
--- a/lessons/workflows/lesson-35-devicespecifictemplate/stage4/guide.md
+++ b/lessons/workflows/lesson-35-devicespecifictemplate/stage4/guide.md
@@ -6,7 +6,7 @@
 
 ### Part 4  - Push Template to Device
 
-After you generate a template configuration the next step is to push the configuration to a device or a series for devices so thats what we will do in this lesson. The sample project we will use is we have to generate a number of VLANs and then push them to a few QFX switches.
+After you generate a template configuration the next step is to push the configuration to a device or a series for devices so that's what we will do in this lesson. The sample project we will use is we have to generate a number of VLANs and then push them to a few QFX switches.
 
 First lets see what VLANs are already configured on the QFX switches, starting with vqfx1:
 
@@ -105,7 +105,7 @@ for device in deviceList:
   cfg=Config(device)
   cfg.load(path='new-vlans.conf', format='text')
   if cfg.commit() == True:
-     print ('configuration commited on ' + device.facts["hostname"])
+     print ('configuration committed on ' + device.facts["hostname"])
   else:
      print ('commit failed on ' + device.facts["hostname"])
      device.close()

From 20a2ac277e7318a43ba1619f1fe6e7ba313c521b Mon Sep 17 00:00:00 2001
From: Stephen Kiely 
Date: Sat, 2 Nov 2019 12:43:58 -0500
Subject: [PATCH 2/6] Make check-spelling.sh executable.

Signed-off-by: Stephen Kiely 
---
 check-spelling.sh | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 mode change 100644 => 100755 check-spelling.sh

diff --git a/check-spelling.sh b/check-spelling.sh
old mode 100644
new mode 100755

From 9e4fda2f80073b0740123817a1f58609d9527cbb Mon Sep 17 00:00:00 2001
From: Stephen Kiely 
Date: Sat, 2 Nov 2019 12:55:29 -0500
Subject: [PATCH 3/6] Add additional words to the personal dictionary due to
 Travis CI pulling a different aspell dictionary.

Signed-off-by: Stephen Kiely 
---
 .aspell.en.pws | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/.aspell.en.pws b/.aspell.en.pws
index b9d5b663..6a2daaf0 100644
--- a/.aspell.en.pws
+++ b/.aspell.en.pws
@@ -239,4 +239,18 @@ reconfiguring
 VM
 TCP
 reachability
-instantiation
\ No newline at end of file
+instantiation
+learnt
+login
+timestamps
+workflow
+Workflow
+workflows
+workflow's
+Workflows
+API
+username
+grpc
+multi
+amongst
+voip
\ No newline at end of file

From 16b0f6b054da9d13a16620f5ad00b86b8723b92d Mon Sep 17 00:00:00 2001
From: Stephen Kiely 
Date: Sat, 2 Nov 2019 12:58:47 -0500
Subject: [PATCH 4/6] Added timestamp to the dictionary.

Signed-off-by: Stephen Kiely 
---
 .aspell.en.pws | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/.aspell.en.pws b/.aspell.en.pws
index 6a2daaf0..bfd5c8ad 100644
--- a/.aspell.en.pws
+++ b/.aspell.en.pws
@@ -253,4 +253,5 @@ username
 grpc
 multi
 amongst
-voip
\ No newline at end of file
+voip
+timestamp
\ No newline at end of file

From 464d8949c8779fcdec8c9970fd2d61e9bb38d80d Mon Sep 17 00:00:00 2001
From: Stephen Kiely 
Date: Sat, 2 Nov 2019 13:32:41 -0500
Subject: [PATCH 5/6] Update Changelog.

Signed-off-by: Stephen Kiely 
---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8a85bba2..87b25db2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,7 @@
 
 ## In development
 
+- Add spelling checks in CI pipeline [#276](https://github.com/nre-learning/nrelabs-curriculum/pull/276)
 - Set tshoot lesson images to use centos7 [#275](https://github.com/nre-learning/nrelabs-curriculum/pull/275)
 - Added FRR PTR demo [#273](https://github.com/nre-learning/nrelabs-curriculum/pull/273)
 - Fixed Cumulus PTR demo [#271](https://github.com/nre-learning/nrelabs-curriculum/pull/271)

From 145d10a2608556f84ad4b62ac10b2df53ba5a250 Mon Sep 17 00:00:00 2001
From: Stephen Kiely 
Date: Tue, 5 Nov 2019 10:50:09 -0600
Subject: [PATCH 6/6] Add additional dictionaries to the spell check.

Signed-off-by: Stephen Kiely 
---
 .travis.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.travis.yml b/.travis.yml
index 17790ba1..3da52680 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -8,6 +8,7 @@ matrix:
           packages:
             - aspell
             - aspell-en
+            - dictionaries-common
       script:
         - ./check-spelling.sh