Merge pull request #22 from alexandruavadanii/fabric-from-cidr

Module: Obtain fabric ID from CIDR
diff --git a/Makefile b/Makefile
index 1043fbe..d166862 100644
--- a/Makefile
+++ b/Makefile
@@ -27,6 +27,7 @@
 
 all:
 	@echo "make install - Install into DESTDIR"
+	@echo "make lint    - Run lint tests"
 	@echo "make test    - Run tests"
 	@echo "make kitchen - Run Kitchen CI tests (create, converge, verify)"
 	@echo "make clean   - Cleanup after tests run"
@@ -45,6 +46,9 @@
 	[ -d $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME) ] || mkdir -p $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
 	cp -a metadata/service/* $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
 
+lint:
+	[ ! -d tests ] || (cd tests; ./run_tests.sh lint)
+
 test:
 	[ ! -d tests ] || (cd tests; ./run_tests.sh)
 
@@ -65,7 +69,7 @@
 	[ ! -f debian/changelog ] || dch -v $(VERSION_MAJOR).$(NEW_MINOR_VERSION) -m --force-distribution -D `dpkg-parsechangelog -S Distribution` "New version"
 	make genchangelog-$(VERSION_MAJOR).$(NEW_MINOR_VERSION)
 	(git add -u; git commit -m "Version $(VERSION_MAJOR).$(NEW_MINOR_VERSION)")
-	git tag -s -m $(NEW_MAJOR_VERSION) $(VERSION_MAJOR).$(NEW_MINOR_VERSION)
+	git tag -s -m $(VERSION_MAJOR).$(NEW_MINOR_VERSION) $(VERSION_MAJOR).$(NEW_MINOR_VERSION)
 
 check-changes:
 	@git log --pretty=oneline --decorate $(VERSION)..HEAD | grep -Eqc '.*' || (echo "No new changes since version $(VERSION)"; exit 1)
diff --git a/README.rst b/README.rst
index f6c1f9f..ae261c8 100644
--- a/README.rst
+++ b/README.rst
@@ -46,14 +46,16 @@
       user: mirantis
       token: "89EgtWkX45ddjMYpuL:SqVjxFG87Dr6kVf4Wp:5WLfbUgmm9XQtJxm3V2LUUy7bpCmqmnk"
       fabrics:
-        test-fabric:
-          description: Test fabric
+        test-fabric1:
+          description: "Test fabric"
+        test-fabric2:
+          description: "Test fabric2"
       subnets:
         subnet1:
-          fabric: test-fabric
+          fabric: test-fabric1
           cidr: 2.2.3.0/24
           gateway_ip: 2.2.3.2
-          iprange:
+          iprange: # reserved range for DHCP\auto mapping
             start: 2.2.3.20
             end: 2.2.3.250
       dhcp_snippets:
@@ -62,6 +64,13 @@
           description: Test snippet
           enabled: true
           subnet: subnet1
+      boot_sources:
+        maas_mirror:
+          url: http://images.maas.io/ephemeral-v3/daily/
+          keyring_file: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
+        local_mirror:
+          url: http://127.0.0.1/maas/images/ephemeral-v3/daily
+          keyring_file: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
       boot_resources:
         bootscript1:
           title: bootscript
@@ -80,38 +89,29 @@
                Version: GnuPG v2
 
                mQENBFOpvpgBCADkP656H41i8fpplEEB8IeLhugyC2rTEwwSclb8tQNYtUiGdna9
-               m38kb0OS2DDrEdtdQb2hWCnswxaAkUunb2qq18vd3dBvlnI+C4/xu5ksZZkRj+fW
-               tArNR18V+2jkwcG26m8AxIrT+m4M6/bgnSfHTBtT5adNfVcTHqiT1JtCbQcXmwVw
-               WbqS6v/LhcsBE//SHne4uBCK/GHxZHhQ5jz5h+3vWeV4gvxS3Xu6v1IlIpLDwUts
-               kT1DumfynYnnZmWTGc6SYyIFXTPJLtnoWDb9OBdWgZxXfHEcBsKGha+bXO+m2tHA
-               gNneN9i5f8oNxo5njrL8jkCckOpNpng18BKXABEBAAG0MlNhbHRTdGFjayBQYWNr
-               YWdpbmcgVGVhbSA8cGFja2FnaW5nQHNhbHRzdGFjay5jb20+iQE4BBMBAgAiBQJT
-               qb6YAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAOCKFJ3le/vhkqB/0Q
-               WzELZf4d87WApzolLG+zpsJKtt/ueXL1W1KA7JILhXB1uyvVORt8uA9FjmE083o1
-               yE66wCya7V8hjNn2lkLXboOUd1UTErlRg1GYbIt++VPscTxHxwpjDGxDB1/fiX2o
-               nK5SEpuj4IeIPJVE/uLNAwZyfX8DArLVJ5h8lknwiHlQLGlnOu9ulEAejwAKt9CU
-               4oYTszYM4xrbtjB/fR+mPnYh2fBoQO4d/NQiejIEyd9IEEMd/03AJQBuMux62tjA
-               /NwvQ9eqNgLw9NisFNHRWtP4jhAOsshv1WW+zPzu3ozoO+lLHixUIz7fqRk38q8Q
-               9oNR31KvrkSNrFbA3D89uQENBFOpvpgBCADJ79iH10AfAfpTBEQwa6vzUI3Eltqb
-               9aZ0xbZV8V/8pnuU7rqM7Z+nJgldibFk4gFG2bHCG1C5aEH/FmcOMvTKDhJSFQUx
-               uhgxttMArXm2c22OSy1hpsnVG68G32Nag/QFEJ++3hNnbyGZpHnPiYgej3FrerQJ
-               zv456wIsxRDMvJ1NZQB3twoCqwapC6FJE2hukSdWB5yCYpWlZJXBKzlYz/gwD/Fr
-               GL578WrLhKw3UvnJmlpqQaDKwmV2s7MsoZogC6wkHE92kGPG2GmoRD3ALjmCvN1E
-               PsIsQGnwpcXsRpYVCoW7e2nW4wUf7IkFZ94yOCmUq6WreWI4NggRcFC5ABEBAAGJ
-               AR8EGAECAAkFAlOpvpgCGwwACgkQDgihSd5Xv74/NggA08kEdBkiWWwJZUZEy7cK
-               WWcgjnRuOHd4rPeT+vQbOWGu6x4bxuVf9aTiYkf7ZjVF2lPn97EXOEGFWPZeZbH4
-               vdRFH9jMtP+rrLt6+3c9j0M8SIJYwBL1+CNpEC/BuHj/Ra/cmnG5ZNhYebm76h5f
-               T9iPW9fFww36FzFka4VPlvA4oB7ebBtquFg3sdQNU/MmTVV4jPFWXxh4oRDDR+8N
-               1bcPnbB11b5ary99F/mqr7RgQ+YFF0uKRE3SKa7a+6cIuHEZ7Za+zhPaQlzAOZlx
+                ......
                fuBmScum8uQTrEF5+Um5zkwC7EXTdH1co/+/V/fpOtxIg4XO4kcugZefVm5ERfVS
                MA==
                =dtMN
                -----END PGP PUBLIC KEY BLOCK-----"
           enabled: true
       machines:
-        machine1:
-          interface:
-            mac: "11:22:33:44:55:66"
+        machine1_new_schema:
+          pxe_interface_mac: "11:22:33:44:55:66" # Node will be identified by those mac
+          interfaces:
+            nic01: # could be any, used for iterate only
+              type: eth # NotImplemented
+              name: eth0 # Override default nic name. Interface to rename will be identified by mac
+              mac: "11:22:33:44:55:66"
+              mode: "static"
+              ip: "2.2.3.19"  # ip should be out of reserved subnet range, but still in subnet range
+              subnet: "subnet1"
+              gateway: "2.2.3.2" # override default gateway from subnet
+            nic02:
+              type: eth # Not-implemented
+              mac: "11:22:33:44:55:78"
+              subnet: "subnet2"
+              mode: "dhcp"
           power_parameters:
             power_type: ipmi
             power_address: '192.168.10.10'
@@ -119,6 +119,23 @@
             power_password: bmc_password
             #Optional (for legacy HW)
             power_driver: LAN
+          distro_series: xenial
+          hwe_kernel: hwe-16.04
+        machine1_old_schema:
+          interface:
+              mac: "11:22:33:44:55:88"  # Node will be identified by those mac
+              mode: "static"
+              ip: "2.2.3.15"
+              subnet: "subnet1"
+              gateway: "2.2.3.2"
+          power_parameters:
+            power_type: ipmi
+            power_address: '192.168.10.10'
+            power_user: bmc_user
+            power_password: bmc_password
+            #Optional (for legacy HW)
+            power_driver: LAN
+            # FIXME: that's should be moved into another,livirt example.
             # Used in case of power_type: virsh
             power_id: my_libvirt_vm_name
           distro_series: xenial
@@ -150,9 +167,142 @@
         enable_http_proxy: true
         default_min_hwe_kernel: ''
        sshprefs:
-        - 'ssh-rsa ASDFOSADFISdfasdfasjdklfjasdJFASDJfASdf923@AAAAB3NzaC1yc2EAAAADAQABAAACAQCv8ISOESGgYUOycYw1SAs/SfHTqtSCTephD/7o2+mEZO53xN98sChiFscFaPA2ZSMoZbJ6MQLKcWKMK2OaTdNSAvn4UE4T6VP0ccdumHDNRwO3f6LptvXr9NR5Wocz2KAgptk+uaA8ytM0Aj9NT0UlfjAXkKnoKyNq6yG+lx4HpwolVaFSlqRXf/iuHpCrspv/u1NW7ReMElJoXv+0zZ7Ow0ZylISdYkaqbV8QatCb17v1+xX03xLsZigfugce/8CDsibSYvJv+Hli5CCBsKgfFqLy4R5vGxiLSVzG/asdjalskjdlkasjdasd/asdajsdkjalaksdjfasd/fa/sdf/asd/fas/dfsadf blah@blah'
+        - 'ssh-rsa ASD.........dfsadf blah@blah'
 
+Update Vlan
 
+NOTE: Vid 0 has default name untagged in MaaS UI
+
+.. code-block:: yaml
+
+  maas:
+    region:
+      fabrics:
+        test-fabric:
+          description: "Test fabric"
+          vlan:
+            0:
+              description: "Your VLAN 0"
+              dhcp: True
+            13:
+              description: "Your VLAN 13"
+              dhcp: False
+
+Create disk schema per machine via maas/client.sls with default lvm schema + default values
+
+NOTE: This should be used mostly for custom root partitioning and RAID configuration. For not-root partitions please use salt-formulas/salt-formula-linux.
+
+.. code-block:: yaml
+
+  maas:
+    region:
+      machines:
+        server1:
+          disk_layout:
+            type: lvm
+            root_size: 20G
+            root_device: vda
+            volume_group: vg1
+            volume_name: root
+            volume_size: 8
+            bootable_device: vda
+
+FLAT layout with custom root size
+
+.. code-block:: yaml
+
+  maas:
+    region:
+      machines:
+        server2:
+          disk_layout:
+            type: flat
+            root_size: 20
+            physical_device: vda
+            bootable_device: vda
+
+Define more complex layout
+
+.. code-block:: yaml
+
+  maas:
+    region:
+      machines:
+        server3:
+          disk_layout:
+            type: flat #This is simplies setup
+            bootable_device: vda
+            disk:
+              vda:
+                type: physical
+                partition_schema:
+                  part1:
+                    size: 10G
+                    type: ext4
+                    mount: '/'
+                  part2:
+                    size: 2G
+                  part3:
+                    size: 3G
+              vdc:
+                type: physical
+                partition_schema:
+                  part1:
+                    size: 100%
+              vdd:
+                type: physical
+                partition_schema:
+                  part1:
+                    size: 100%
+              raid0:
+                type: raid
+                level: 10
+                devices:
+                  - vde
+                  - vdf
+                partition_schema:
+                  part1:
+                    size: 10G
+                  part2:
+                    size: 2G
+                  part3:
+                    size: 3G
+              raid1:
+                type: raid
+                level: 1
+                partitions:
+                  - vdc-part1
+                  - vdd-part1
+              volume_group2:
+                type: lvm
+                devices:
+                  - raid1
+                volume:
+                  tmp:
+                    size: 5G
+                    type: ext4
+                    mount: '/tmp'
+                  log:
+                    size: 7G
+                    type: ext4
+                    mount: '/var/log'
+
+Setup image mirror
+
+.. code-block:: yaml
+
+  maas:
+    mirror:
+      enabled: true
+      image:
+        release:
+          xenial:
+          keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
+          upstream: http://images.maas.io/ephemeral-v3/daily/
+          local_dir: /var/www/html/maas/images/ephemeral-v3/daily
+          arch: amd64
+          subarch: 'generic|hwe-t'
+          count: 1
 
 Usage of local repos
 
@@ -169,29 +319,7 @@
         Version: GnuPG v2
 
         mQENBFOpvpgBCADkP656H41i8fpplEEB8IeLhugyC2rTEwwSclb8tQNYtUiGdna9
-        m38kb0OS2DDrEdtdQb2hWCnswxaAkUunb2qq18vd3dBvlnI+C4/xu5ksZZkRj+fW
-        tArNR18V+2jkwcG26m8AxIrT+m4M6/bgnSfHTBtT5adNfVcTHqiT1JtCbQcXmwVw
-        WbqS6v/LhcsBE//SHne4uBCK/GHxZHhQ5jz5h+3vWeV4gvxS3Xu6v1IlIpLDwUts
-        kT1DumfynYnnZmWTGc6SYyIFXTPJLtnoWDb9OBdWgZxXfHEcBsKGha+bXO+m2tHA
-        gNneN9i5f8oNxo5njrL8jkCckOpNpng18BKXABEBAAG0MlNhbHRTdGFjayBQYWNr
-        YWdpbmcgVGVhbSA8cGFja2FnaW5nQHNhbHRzdGFjay5jb20+iQE4BBMBAgAiBQJT
-        qb6YAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAOCKFJ3le/vhkqB/0Q
-        WzELZf4d87WApzolLG+zpsJKtt/ueXL1W1KA7JILhXB1uyvVORt8uA9FjmE083o1
-        yE66wCya7V8hjNn2lkLXboOUd1UTErlRg1GYbIt++VPscTxHxwpjDGxDB1/fiX2o
-        nK5SEpuj4IeIPJVE/uLNAwZyfX8DArLVJ5h8lknwiHlQLGlnOu9ulEAejwAKt9CU
-        4oYTszYM4xrbtjB/fR+mPnYh2fBoQO4d/NQiejIEyd9IEEMd/03AJQBuMux62tjA
-        /NwvQ9eqNgLw9NisFNHRWtP4jhAOsshv1WW+zPzu3ozoO+lLHixUIz7fqRk38q8Q
-        9oNR31KvrkSNrFbA3D89uQENBFOpvpgBCADJ79iH10AfAfpTBEQwa6vzUI3Eltqb
-        9aZ0xbZV8V/8pnuU7rqM7Z+nJgldibFk4gFG2bHCG1C5aEH/FmcOMvTKDhJSFQUx
-        uhgxttMArXm2c22OSy1hpsnVG68G32Nag/QFEJ++3hNnbyGZpHnPiYgej3FrerQJ
-        zv456wIsxRDMvJ1NZQB3twoCqwapC6FJE2hukSdWB5yCYpWlZJXBKzlYz/gwD/Fr
-        GL578WrLhKw3UvnJmlpqQaDKwmV2s7MsoZogC6wkHE92kGPG2GmoRD3ALjmCvN1E
-        PsIsQGnwpcXsRpYVCoW7e2nW4wUf7IkFZ94yOCmUq6WreWI4NggRcFC5ABEBAAGJ
-        AR8EGAECAAkFAlOpvpgCGwwACgkQDgihSd5Xv74/NggA08kEdBkiWWwJZUZEy7cK
-        WWcgjnRuOHd4rPeT+vQbOWGu6x4bxuVf9aTiYkf7ZjVF2lPn97EXOEGFWPZeZbH4
-        vdRFH9jMtP+rrLt6+3c9j0M8SIJYwBL1+CNpEC/BuHj/Ra/cmnG5ZNhYebm76h5f
-        T9iPW9fFww36FzFka4VPlvA4oB7ebBtquFg3sdQNU/MmTVV4jPFWXxh4oRDDR+8N
-        1bcPnbB11b5ary99F/mqr7RgQ+YFF0uKRE3SKa7a+6cIuHEZ7Za+zhPaQlzAOZlx
+        .....
         fuBmScum8uQTrEF5+Um5zkwC7EXTdH1co/+/V/fpOtxIg4XO4kcugZefVm5ERfVS
         MA==
         =dtMN
@@ -215,6 +343,16 @@
         enabled: true
         role: master/slave
 
+MAAS region service with backup data
+
+.. code-block:: yaml
+    maas:
+      region:
+        database:
+          initial_data:
+            source: cfg01.local
+            host: 192.168.0.11
+
 Module function's example:
 ==========================
 
@@ -259,7 +397,17 @@
         - cmd: maas_login_admin
       ...
 
-List of avaibled `req_status` defined in global variable:
+List of available `req_status` defined in global variable:
+
+.. code-block:: python
+
+    STATUS_NAME_DICT = dict([
+        (0, 'New'), (1, 'Commissioning'), (2, 'Failed commissioning'),
+        (3, 'Missing'), (4, 'Ready'), (5, 'Reserved'), (10, 'Allocated'),
+        (9, 'Deploying'), (6, 'Deployed'), (7, 'Retired'), (8, 'Broken'),
+        (11, 'Failed deployment'), (12, 'Releasing'),
+        (13, 'Releasing failed'), (14, 'Disk erasing'),
+        (15, 'Failed disk erasing')])
 
 
 Read more
diff --git a/_modules/maas.py b/_modules/maas.py
index 2a615b7..426aff5 100644
--- a/_modules/maas.py
+++ b/_modules/maas.py
@@ -52,7 +52,10 @@
     (9, 'Deploying'), (6, 'Deployed'), (7, 'Retired'), (8, 'Broken'),
     (11, 'Failed deployment'), (12, 'Releasing'),
     (13, 'Releasing failed'), (14, 'Disk erasing'),
-    (15, 'Failed disk erasing')])
+    (15, 'Failed disk erasing'), (16, 'Rescue mode'),
+    (17, 'Entering rescue mode'), (18, 'Failed to enter rescue mode'),
+    (19, 'Exiting rescue mode'), (20, 'Failed to exit rescue mode'),
+    (21, 'Testing'), (22, 'Failed testing')])
 
 
 def _format_data(data):
@@ -97,6 +100,9 @@
                                 None, **data).read()
 
     def process(self, objects_name=None):
+        # FIXME: probably, should be extended with "skipped" return.
+        # For example, currently "DEPLOYED" nodes are skipped, and no changes
+        # applied - but they fall into 'updated' list.
         ret = {
             'success': [],
             'errors': {},
@@ -148,6 +154,8 @@
                         self.send(data)
                         ret['success'].append(name)
                 except urllib2.HTTPError as e:
+                    # FIXME add exception's for response:
+                    # '{"mode": ["Interface is already set to DHCP."]}
                     etxt = e.read()
                     LOG.error('Failed for object %s reason %s', name, etxt)
                     ret['errors'][name] = str(etxt)
@@ -168,7 +176,11 @@
             LOG.exception('Error Global')
             raise
         if ret['errors']:
-            raise Exception(ret)
+            if 'already exists' in str(ret['errors']):
+                ret['success'] = ret['errors']
+                ret['errors'] = {}
+            else:
+                raise Exception(ret)
         return ret
 
 
@@ -281,6 +293,27 @@
         return new
 
 
+class Boot_source(MaasObject):
+    def __init__(self):
+        super(Boot_source, self).__init__()
+        self._all_elements_url = u'api/2.0/boot-sources/'
+        self._create_url = u'api/2.0/boot-sources/'
+        self._update_url = u'api/2.0/boot-sources/{0}/'
+        self._config_path = 'region.boot_sources'
+        self._element_key = 'id'
+
+    def fill_data(self, name, boot_source):
+        data = {
+            'name': name,
+            'url': boot_source.get('url', ''),
+            'keyring_filename': boot_source.get('keyring_file', ''),
+        }
+        return data
+
+    def update(self, new, old):
+        new['id'] = str(old['id'])
+        return new
+
 class PacketRepository(MaasObject):
     def __init__(self):
         super(PacketRepository, self).__init__()
@@ -373,13 +406,23 @@
 
     def fill_data(self, name, machine_data):
         power_data = machine_data['power_parameters']
+        machine_pxe_mac = machine_data.get('pxe_interface_mac', None)
+        if machine_data.get("interface", None):
+            LOG.warning(
+                "Old machine-describe detected! "
+                "Please read documentation for "
+                "'salt-formulas/maas' for migration!")
+            machine_pxe_mac = machine_data['interface'].get('mac', None)
+        if not machine_pxe_mac:
+            raise Exception("PXE MAC for machine:{} not defined".format(name))
         data = {
             'hostname': name,
             'architecture': machine_data.get('architecture', 'amd64/generic'),
-            'mac_addresses': machine_data['interface']['mac'],
-            'power_type': machine_data.get('power_type', 'ipmi'),
-            'power_parameters_power_address': power_data['power_address'],
+            'mac_addresses': machine_pxe_mac,
+            'power_type': power_data.get('power_type', 'manual'),
         }
+        if 'power_address' in power_data:
+            data['power_parameters_power_address'] = power_data['power_address']
         if 'power_driver' in power_data:
             data['power_parameters_power_driver'] = power_data['power_driver']
         if 'power_user' in power_data:
@@ -389,6 +432,16 @@
                 power_data['power_password']
         if 'power_id' in power_data:
             data['power_parameters_power_id'] = power_data['power_id']
+        if 'power_nova_id' in power_data:
+            data['power_parameters_nova_id'] = power_data['power_nova_id']
+        if 'power_os_tenantname' in power_data:
+            data['power_parameters_os_tenantname'] = power_data['power_os_tenantname']
+        if 'power_os_username' in power_data:
+            data['power_parameters_os_username'] = power_data['power_os_username']
+        if 'power_os_password' in power_data:
+            data['power_parameters_os_password'] = power_data['power_os_password']
+        if 'power_os_authurl' in power_data:
+            data['power_parameters_os_authurl'] = power_data['power_os_authurl']
         return data
 
     def update(self, new, old):
@@ -404,6 +457,7 @@
 
 
 class AssignMachinesIP(MaasObject):
+    # FIXME
     READY = 4
     DEPLOYED = 6
 
@@ -419,27 +473,153 @@
         self._extra_data_urls = {'machines': (u'api/2.0/machines/',
                                               None, 'hostname')}
 
-    def fill_data(self, name, data, machines):
-        interface = data['interface']
-        machine = machines[name]
-        if machine['status'] == self.DEPLOYED:
-            return
-        if machine['status'] != self.READY:
-            raise Exception('Machine:{} not in READY state'.format(name))
-        if 'ip' not in interface:
-            return
+    def _data_old(self, _interface, _machine):
+        """
+        _interface = {
+            "mac": "11:22:33:44:55:77",
+            "mode": "STATIC",
+            "ip": "2.2.3.15",
+            "subnet": "subnet1",
+            "gateway": "2.2.3.2",
+        }
+        :param data:
+        :return:
+        """
         data = {
             'mode': 'STATIC',
-            'subnet': str(interface.get('subnet')),
-            'ip_address': str(interface.get('ip')),
+            'subnet': str(_interface.get('subnet')),
+            'ip_address': str(_interface.get('ip')),
         }
-        if 'default_gateway' in interface:
-            data['default_gateway'] = interface.get('gateway')
+        if 'gateway' in _interface:
+            data['default_gateway'] = _interface.get('gateway')
         data['force'] = '1'
-        data['system_id'] = str(machine['system_id'])
-        data['interface_id'] = str(machine['interface_set'][0]['id'])
+        data['system_id'] = str(_machine['system_id'])
+        data['interface_id'] = str(_machine['interface_set'][0]['id'])
         return data
 
+    def _get_nic_id_by_mac(self, machine, req_mac=None):
+        data = {}
+        for nic in machine['interface_set']:
+            data[nic['mac_address']] = nic['id']
+        if req_mac:
+            if req_mac in data.keys():
+                return data[req_mac]
+            else:
+                raise Exception('NIC with mac:{} not found at '
+                                'node:{}'.format(req_mac, machine['fqdn']))
+        return data
+
+    def _disconnect_all_nic(self, machine):
+        """
+            Maas will fail, in case same config's will be to apply
+            on different interfaces. In same time - not possible to push
+            whole network schema at once. Before configuring - need to clean-up
+            everything
+        :param machine:
+        :return:
+        """
+        for nic in machine['interface_set']:
+            LOG.debug("Disconnecting interface:{}".format(nic['mac_address']))
+            try:
+                self._maas.post(
+                    u'/api/2.0/nodes/{}/interfaces/{}/'.format(
+                        machine['system_id'], nic['id']), 'disconnect')
+            except Exception as e:
+                LOG.error("Failed to disconnect interface:{} on node:{}".format(
+                    nic['mac_address'], machine['fqdn']))
+                raise Exception(str(e))
+
+    def _process_interface(self, nic_data,  machine):
+        """
+            Process exactly one interface:
+                - update interface
+                - link to network
+            These functions are self-complementary, and do not require an
+            external "process" method. Those broke old-MaasObject logic,
+            though make functions more simple in case iterable tasks.
+        """
+        nic_id = self._get_nic_id_by_mac(machine, nic_data['mac'])
+
+        # Process op=link_subnet
+        link_data = {}
+        _mode = nic_data.get('mode', 'AUTO').upper()
+        if _mode == 'STATIC':
+            link_data = {
+                'mode': 'STATIC',
+                'subnet': str(nic_data.get('subnet')),
+                'ip_address': str(nic_data.get('ip')),
+                'default_gateway': str(nic_data.get('gateway', "")),
+            }
+        elif _mode == 'DHCP':
+            link_data = {
+                'mode': 'DHCP',
+                'subnet': str(nic_data.get('subnet')),
+            }
+        elif _mode == 'AUTO':
+            link_data = {'mode': 'AUTO',
+                         'default_gateway': str(nic_data.get('gateway', "")), }
+        elif _mode in ('LINK_UP', 'UNCONFIGURED'):
+            link_data = {'mode': 'LINK_UP'}
+        else:
+            raise Exception('Wrong IP mode:{}'
+                            ' for node:{}'.format(_mode, machine['fqdn']))
+        link_data['force'] = str(1)
+
+        physical_data = {"name": nic_data.get("name", ""),
+                         "tags": nic_data.get('tags', ""),
+                         "vlan": nic_data.get('vlan', "")}
+
+        try:
+            # Cleanup-old definition
+            LOG.debug("Processing {}:{},{}".format(nic_data['mac'], link_data,
+                                                   physical_data))
+            # "link_subnet" and "fill all other data" - its 2 different
+            # operations. So, first we update NIC:
+            self._maas.put(
+                u'/api/2.0/nodes/{}/interfaces/{}/'.format(machine['system_id'],
+                                                           nic_id),
+                **physical_data)
+            # And then, link subnet configuration:
+            self._maas.post(
+                u'/api/2.0/nodes/{}/interfaces/{}/'.format(machine['system_id'],
+                                                           nic_id),
+                'link_subnet', **link_data)
+        except Exception as e:
+            LOG.error("Failed to process interface:{} on node:{}".format(
+                nic_data['mac'], machine['fqdn']))
+            raise Exception(str(e))
+
+    def fill_data(self, name, data, machines):
+        machine = machines[name]
+        if machine['status'] == self.DEPLOYED:
+            LOG.debug("Skipping node:{} "
+                      "since it in status:DEPLOYED".format(name))
+            return
+        if machine['status'] != self.READY:
+            raise Exception('Machine:{} not in status:READY'.format(name))
+        # backward comparability, for old schema
+        if data.get("interface", None):
+            if 'ip' not in data["interface"]:
+                LOG.info("No IP NIC definition for:{}".format(name))
+                return
+            LOG.warning(
+                "Old machine-describe detected! "
+                "Please read documentation "
+                "'salt-formulas/maas' for migration!")
+            return self._data_old(data['interface'], machines[name])
+        # NewSchema processing:
+        # Warning: old-style MaasObject.process still be called, but
+        # with empty data for process.
+        interfaces = data.get('interfaces', {})
+        if len(interfaces.keys()) == 0:
+            LOG.info("No IP NIC definition for:{}".format(name))
+            return
+        LOG.info('%s for %s', self.__class__.__name__.lower(),
+                 machine['fqdn'])
+        self._disconnect_all_nic(machine)
+        for key, value in sorted(interfaces.iteritems()):
+            self._process_interface(value, machine)
+
 
 class DeployMachines(MaasObject):
     # FIXME
@@ -756,6 +936,8 @@
 def process_fabrics():
     return Fabric().process()
 
+def process_boot_sources():
+    return Boot_source().process()
 
 def process_subnets():
     return Subnet().process()
@@ -778,6 +960,10 @@
 
 
 def process_assign_machines_ip(*args):
+    """
+    Manage interface configurations.
+    See readme.rst for more info
+    """
     return AssignMachinesIP().process(*args)
 
 
diff --git a/_modules/maasng.py b/_modules/maasng.py
new file mode 100644
index 0000000..1939b80
--- /dev/null
+++ b/_modules/maasng.py
@@ -0,0 +1,898 @@
+# -*- coding: utf-8 -*-
+'''
+Module for handling maas calls.
+
+:optdepends:    pyapi-maas Python adapter
+:configuration: This module is not usable until the following are specified
+                either in a pillar or in the minion's config file::
+
+        maas.url: 'https://maas.domain.com/'
+        maas.token: fdsfdsdsdsfa:fsdfae3fassd:fdsfdsfsafasdfsa
+
+'''
+
+from __future__ import absolute_import
+
+import collections
+import copy
+import hashlib
+import io
+import json
+import logging
+import os.path
+import time
+import urllib2
+# Salt utils
+from salt.exceptions import CommandExecutionError, SaltInvocationError
+
+LOG = logging.getLogger(__name__)
+
+SIZE = {
+    "M": 1000000,
+    "G": 1000000000,
+    "T": 1000000000000,
+}
+
+RAID = {
+    0: "raid-0",
+    1: "raid-1",
+    5: "raid-5",
+    10: "raid-10",
+}
+
+# Import third party libs
+HAS_MASS = False
+try:
+    from maas_client import MAASClient, MAASDispatcher, MAASOAuth
+    HAS_MASS = True
+except ImportError:
+    LOG.debug('Missing MaaS client module is Missing. Skipping')
+
+
+def __virtual__():
+    '''
+    Only load this module if maas-client
+    is installed on this minion.
+    '''
+    if HAS_MASS:
+        return 'maasng'
+    return False
+
+
+APIKEY_FILE = '/var/lib/maas/.maas_credentials'
+
+
+def _format_data(data):
+    class Lazy:
+        def __str__(self):
+            return ' '.join(['{0}={1}'.format(k, v)
+                             for k, v in data.iteritems()])
+    return Lazy()
+
+
+def _create_maas_client():
+    global APIKEY_FILE
+    try:
+        api_token = file(APIKEY_FILE).read().splitlines()[-1].strip()\
+            .split(':')
+    except:
+        LOG.exception('token')
+    auth = MAASOAuth(*api_token)
+    api_url = 'http://localhost:5240/MAAS'
+    dispatcher = MAASDispatcher()
+    return MAASClient(auth, dispatcher, api_url)
+
+
+def _get_blockdevice_id_by_name(hostname, device):
+
+    # TODO validation
+    return list_blockdevices(hostname)[device]["id"]
+
+
+def _get_volume_group_id_by_name(hostname, device):
+
+    # TODO validation
+    return list_volume_groups(hostname)[device]["id"]
+
+
+def _get_partition_id_by_name(hostname, device, partition):
+
+    # TODO validation
+    return list_partitions(hostname, device)[partition]["id"]
+
+# MACHINE SECTION
+
+
+def get_machine(hostname):
+    '''
+    Get information aboout specified machine
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt-call maasng.get_machine server_hostname
+    '''
+    try:
+        return list_machines()[hostname]
+    except KeyError:
+        return {"error": "Machine not found on MaaS server"}
+
+
+def list_machines():
+    '''
+    Get list of all machines from maas server
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.list_machines
+    '''
+    machines = {}
+    maas = _create_maas_client()
+    json_res = json.loads(maas.get(u'api/2.0/machines/').read())
+    for item in json_res:
+        machines[item["hostname"]] = item
+    return machines
+
+
+def create_machine():
+    # TODO
+
+    return False
+
+
+def update_machine():
+    # TODO
+
+    return False
+
+# MACHINE OPERATIONS
+# TODO
+
+# RAID SECTION
+
+
+def create_raid(hostname, name, level, disks=[], partitions=[], **kwargs):
+    '''
+    Create new raid on machine.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt-call maasng.create_raid hostname=kvm03 name=md0 level=1 disks=[vdb,vdc] partitions=[vdd-part1,vde-part1]
+    '''
+
+    result = {}
+
+    if len(disks) == 0 and len(partitions) == 0:
+        result["error"] = "Disks or partitions need to be provided"
+
+    disk_ids = []
+    partition_ids = []
+
+    for disk in disks:
+        try:
+            disk_ids.append(str(_get_blockdevice_id_by_name(hostname, disk)))
+        except KeyError:
+            result["error"] = "Device {0} does not exists on machine {1}".format(
+                disk, hostname)
+            return result
+
+    for partition in partitions:
+        try:
+            device = partition.split("-")[0]
+            device_part = list_partitions(hostname, device)
+            partition_ids.append(str(device_part[partition]["id"]))
+        except KeyError:
+            result["error"] = "Partition {0} does not exists on machine {1}".format(
+                partition, hostname)
+            return result
+
+    data = {
+        "name": name,
+        "level": RAID[int(level)],
+        "block_devices": disk_ids,
+        "partitions": partition_ids,
+    }
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    # TODO validation
+    LOG.info(data)
+    json_res = json.loads(
+        maas.post(u"api/2.0/nodes/{0}/raids/".format(system_id), None, **data).read())
+    LOG.info(json_res)
+    result["new"] = "Raid {0} created".format(name)
+
+    return result
+
+
+def list_raids(hostname):
+    '''
+    Get list all raids on machine
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt-call maasng.list_raids server_hostname
+    '''
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+    # TODO validation
+    json_res = json.loads(
+        maas.get(u"api/2.0/nodes/{0}/raids/".format(system_id)).read())
+    LOG.info(json_res)
+
+    # TODO return list of raid devices
+    return True
+
+
+def get_raid(hostname, name):
+    '''
+    Get information about specific raid on machine
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt-call maasng.get_raids server_hostname md0
+    '''
+
+    return list_raids(hostname)[name]
+
+
+# BLOCKDEVICES SECTION
+
+def list_blockdevices(hostname):
+    '''
+    Get list of all blockdevices (disks) on machine
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.list_blockdevices server_hostname
+        salt-call maasng.list_blockdevices server_hostname
+    '''
+    ret = {}
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    # TODO validation if exists
+
+    json_res = json.loads(
+        maas.get(u"api/2.0/nodes/{0}/blockdevices/".format(system_id)).read())
+    LOG.info(json_res)
+    for item in json_res:
+        ret[item["name"]] = item
+
+    return ret
+
+
+def get_blockdevice(hostname, name):
+    '''
+    Get information about blockdevice (disk) on machine
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.get_blockdevice server_hostname sda
+        salt-call maasng.get_blockdevice server_hostname sda
+    '''
+
+    return list_blockdevices(hostname)[name]
+
+
+# PARTITIONS
+
+def list_partitions(hostname, device):
+    '''
+    Get list of all partitions on specific device located on specific machine
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.list_partitions server_hostname sda
+        salt-call maasng.list_partitions server_hostname sda
+    '''
+    ret = {}
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    partitions = get_blockdevice(hostname, device)["partitions"]
+    LOG.info(partitions)
+
+    #json_res = json.loads(maas.get(u"api/2.0/nodes/{0}/blockdevices/{1}/partitions/".format(system_id, device_id)).read())
+    # LOG.info(json_res)
+
+    if len(device) > 0:
+        for item in partitions:
+            name = item["path"].split('/')[-1]
+            ret[name] = item
+
+    return ret
+
+
+def get_partition(hostname, device, partition):
+    '''
+    Get information about specific parition on device located on machine
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.get_partition server_hostname disk_name partition
+        salt-call maasng.get_partition server_hostname disk_name partition
+
+        root_size = size in GB
+    '''
+
+    return list_partitions(partition)[name]
+
+
+def create_partition(hostname, disk, size, fs_type=None, mount=None):
+    '''
+    Create new partition on device.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.create_partition server_hostname disk_name 10 ext4 "/"
+        salt-call maasng.create_partition server_hostname disk_name 10 ext4 "/"
+    '''
+    # TODO validation
+    result = {}
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    device_id = _get_blockdevice_id_by_name(hostname, disk)
+    LOG.info(device_id)
+
+    value, unit = size[:-1], size[-1]
+    calc_size = str(int(value) * SIZE[unit])
+    LOG.info(calc_size)
+
+    data = {
+        "size": calc_size
+    }
+
+    # TODO validation
+    partition = json.loads(maas.post(
+        u"api/2.0/nodes/{0}/blockdevices/{1}/partitions/".format(system_id, device_id), None, **data).read())
+    LOG.info(partition)
+    result["partition"] = "Partition created on {0}".format(disk)
+
+    if fs_type != None:
+        data_fs_type = {
+            "fstype": fs_type
+        }
+        partition_id = str(partition["id"])
+        LOG.info("Partition id: " + partition_id)
+        # TODO validation
+        json_res = json.loads(maas.post(u"api/2.0/nodes/{0}/blockdevices/{1}/partition/{2}".format(
+            system_id, device_id, partition_id), "format", **data_fs_type).read())
+        LOG.info(json_res)
+        result["filesystem"] = "Filesystem {0} created".format(fs_type)
+
+    if mount != None:
+        data = {
+            "mount_point": mount
+        }
+
+        # TODO validation
+        json_res = json.loads(maas.post(u"api/2.0/nodes/{0}/blockdevices/{1}/partition/{2}".format(
+            system_id, device_id, str(partition['id'])), "mount", **data).read())
+        LOG.info(json_res)
+        result["mount"] = "Mount point {0} created".format(mount)
+
+    return result
+
+
+def delete_partition(hostname, disk, partition_name):
+    '''
+    Delete partition on device.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.delete_partition server_hostname disk_name partition_name
+        salt-call maasng.delete_partition server_hostname disk_name partition_name
+
+        root_size = size in GB
+    '''
+    result = {}
+    data = {}
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    device_id = _get_blockdevice_id_by_name(hostname, disk)
+    LOG.info(device_id)
+
+    partition_id = _get_partition_id_by_name(hostname, disk, partition_name)
+
+    maas.delete(u"api/2.0/nodes/{0}/blockdevices/{1}/partition/{2}".format(
+        system_id, device_id, partition_id)).read()
+    result["new"] = "Partition {0} deleted".format(partition_name)
+    return result
+
+
+def delete_partition_by_id(hostname, disk, partition_id):
+    '''
+    Delete partition on device. Partition spefified by id of parition
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.delete_partition_by_id server_hostname disk_name partition_id
+        salt-call maasng.delete_partition_by_id server_hostname disk_name partition_id
+
+        root_size = size in GB
+    '''
+    result = {}
+    data = {}
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    device_id = _get_blockdevice_id_by_name(hostname, disk)
+    LOG.info(device_id)
+
+    maas.delete(u"api/2.0/nodes/{0}/blockdevices/{1}/partition/{2}".format(
+        system_id, device_id, partition_id)).read()
+    result["new"] = "Partition {0} deleted".format(partition_id)
+    return result
+
+# CREATE DISK LAYOUT
+# TODO
+
+
+def update_disk_layout(hostname, layout, root_size=None, root_device=None, volume_group=None, volume_name=None, volume_size=None):
+    '''
+    Update disk layout. Flat or LVM layout supported.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.update_disk_layout server_hostname lvm root_size=None, root_device=None, volume_group=None, volume_name=None, volume_size=None
+        salt-call maasng.update_disk_layout server_hostname lvm root_size=None, root_device=None, volume_group=None, volume_name=None, volume_size=None
+
+        root_size = size in GB
+    '''
+    result = {}
+    data = {
+        "storage_layout": layout,
+    }
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    if root_size != None:
+        bit_size = str(root_size * 1073741824)
+        LOG.info(bit_size)
+        data["root_size"] = bit_size
+
+    if root_device != None:
+        LOG.info(root_device)
+        data["root_device"] = str(
+            _get_blockdevice_id_by_name(hostname, root_device))
+
+    if layout == 'lvm':
+        if volume_group != None:
+            LOG.info(volume_group)
+            data["vg_name"] = volume_group
+        if volume_name != None:
+            LOG.info(volume_name)
+            data["lv_name"] = volume_name
+        if volume_size != None:
+            vol_size = str(volume_size * 1073741824)
+            LOG.info(vol_size)
+            data["lv_size"] = vol_size
+
+    # TODO validation
+    json_res = json.loads(maas.post(
+        u"api/2.0/machines/{0}/".format(system_id), "set_storage_layout", **data).read())
+    LOG.info(json_res)
+    result["new"] = {
+        "storage_layout": layout,
+    }
+
+    return result
+
+
+# LVM
+# TODO
+
+def list_volume_groups(hostname):
+    '''
+    Get list of all volume group on machine.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.list_volume_groups server_hostname
+        salt-call maasng.list_volume_groups server_hostname
+    '''
+    volume_groups = {}
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    # TODO validation if exists
+
+    json_res = json.loads(
+        maas.get(u"api/2.0/nodes/{0}/volume-groups/".format(system_id)).read())
+    LOG.info(json_res)
+    for item in json_res:
+        volume_groups[item["name"]] = item
+    # return
+    return volume_groups
+
+
+def get_volume_group(hostname, name):
+    '''
+    Get information about specific volume group on machine.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.list_blockdevices server_hostname
+        salt-call maasng.list_blockdevices server_hostname
+    '''
+    # TODO validation that exists
+    return list_volume_groups(hostname)[name]
+
+
+def create_volume_group(hostname, volume_group_name, disks=[], partitions=[]):
+    '''
+    Create new volume group on machine. Disks or partitions needs to be provided.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.create_volume_group volume_group_name, disks=[sda,sdb], partitions=[]
+        salt-call maasng.create_volume_group server_hostname
+    '''
+    result = {}
+
+    data = {
+        "name": volume_group_name,
+    }
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    disk_ids = []
+    partition_ids = []
+
+    for disk in disks:
+        p_disk = get_blockdevice(hostname, disk)
+        if p_disk["partition_table_type"] == None:
+            disk_ids.append(str(p_disk["id"]))
+        else:
+            result["error"] = "Device {0} on machine {1} cointains partition table".format(
+                disk, hostname)
+            return result
+
+    for partition in partitions:
+        try:
+            device = partition.split("-")[0]
+            device_part = list_partitions(hostname, device)
+            partition_ids.append(str(device_part[partition]["id"]))
+        except KeyError:
+            result["error"] = "Partition {0} does not exists on machine {1}".format(
+                partition, hostname)
+            return result
+
+    data["block_devices"] = disk_ids
+    data["partitions"] = partition_ids
+    LOG.info(partition_ids)
+    LOG.info(partitions)
+
+    # TODO validation
+    json_res = json.loads(maas.post(
+        u"api/2.0/nodes/{0}/volume-groups/".format(system_id), None, **data).read())
+    LOG.info(json_res)
+    result["new"] = "Volume group {0} created".format(json_res["name"])
+
+    return result
+
+
+def delete_volume_group(hostname, name):
+    '''
+    Delete volume group on machine.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.delete_volume_group server_hostname vg0
+        salt-call maasng.delete_volume_group server_hostname vg0
+    '''
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    # TODO partitions
+    # for partition in partitions:
+    #    temp = disk.split("-")
+    #    disk_ids.append(str(_get_blockdevice_id_by_name(hostname, temp[] partition)))
+
+    # TODO partitions
+    vg_id = name
+
+    # TODO validation
+    json_res = json.loads(maas.delete(
+        u"api/2.0/nodes/{0}/volume-group/{1}/".format(system_id, vg_id)).read())
+    LOG.info(json_res)
+
+    return True
+
+
+def create_volume(hostname, volume_name, volume_group, size, fs_type=None, mount=None):
+    '''
+    Create volume on volume group.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.create_volume server_hostname volume_name, volume_group, size, fs_type=None, mount=None
+        salt-call maasng.create_volume server_hostname volume_name, volume_group, size, fs_type=None, mount=None
+    '''
+
+    data = {
+        "name": volume_name,
+    }
+
+    value, unit = size[:-1], size[-1]
+    bit_size = str(int(value) * SIZE[unit])
+    LOG.info(bit_size)
+
+    data["size"] = bit_size
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    LOG.info(system_id)
+
+    volume_group_id = str(_get_volume_group_id_by_name(hostname, volume_group))
+
+    LOG.info(volume_group_id)
+
+    # TODO validation
+    json_res = json.loads(maas.post(u"api/2.0/nodes/{0}/volume-group/{1}/".format(
+        system_id, volume_group_id), "create_logical_volume", **data).read())
+    LOG.info(json_res)
+
+    if fs_type != None or mount != None:
+        ret = create_volume_filesystem(
+            hostname, volume_group + "-" + volume_name, fs_type, mount)
+
+    return True
+
+
+def create_volume_filesystem(hostname, device, fs_type=None, mount=None):
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+
+    blockdevices_id = _get_blockdevice_id_by_name(hostname, device)
+    data = {}
+    if fs_type != None:
+        data["fstype"] = fs_type
+        # TODO validation
+        json_res = json.loads(maas.post(u"/api/2.0/nodes/{0}/blockdevices/{1}/".format(
+            system_id, blockdevices_id), "format", **data).read())
+        LOG.info(json_res)
+
+    if mount != None:
+        data["mount_point"] = mount
+        # TODO validation
+        json_res = json.loads(maas.post(u"/api/2.0/nodes/{0}/blockdevices/{1}/".format(
+            system_id, blockdevices_id), "mount", **data).read())
+        LOG.info(json_res)
+
+    return True
+
+
+def create_partition_filesystem(hostname, partition, fs_type=None, mount=None):
+
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+
+    blockdevices_id = _get_blockdevice_id_by_name(hostname, device)
+    data = {}
+    if fs_type != None:
+        data["fstype"] = fs_type
+        # TODO validation
+        json_res = json.loads(maas.post(u"/api/2.0/nodes/{0}/blockdevices/{1}/".format(
+            system_id, blockdevices_id), "format", **data).read())
+        LOG.info(json_res)
+
+    if mount != None:
+        data["mount_point"] = mount
+        # TODO validation
+        json_res = json.loads(maas.post(u"/api/2.0/nodes/{0}/blockdevices/{1}/".format(
+            system_id, blockdevices_id), "mount", **data).read())
+        LOG.info(json_res)
+
+    return True
+
+
+def set_boot_disk(hostname, name):
+    '''
+    Create volume on volume group.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.set_boot_disk server_hostname disk_name
+        salt-call maasng.set_boot_disk server_hostname disk_name
+    '''
+    data = {}
+    result = {}
+    maas = _create_maas_client()
+    system_id = get_machine(hostname)["system_id"]
+    blockdevices_id = _get_blockdevice_id_by_name(hostname, name)
+
+    maas.post(u"/api/2.0/nodes/{0}/blockdevices/{1}/".format(
+        system_id, blockdevices_id), "set_boot_disk", **data).read()
+    # TODO validation for error response (disk does not exists and node does not exists)
+    result["new"] = "Disk {0} was set as bootable".format(name)
+
+    return result
+
+
+def list_fabric():
+    '''
+    Get list of all fabric
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.list_fabric
+    '''
+    fabrics = {}
+    maas = _create_maas_client()
+    json_res = json.loads(maas.get(u'api/2.0/fabrics/').read())
+    LOG.info(json_res)
+    for item in json_res:
+        fabrics[item["name"]] = item
+    return fabrics
+
+
+def create_fabric(name):
+    '''
+    Create new fabric.
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.create_fabric
+    '''
+    result = {}
+    data = {
+        "name": name,
+        "description": '',
+        "class_type": '',
+
+    }
+
+    maas = _create_maas_client()
+    json_res = json.loads(maas.post(u"api/2.0/fabrics/", None, **data).read())
+    LOG.info(json_res)
+    result["new"] = "Fabrics {0} created".format(json_res["name"])
+    return result
+
+
+def list_subnet():
+    '''
+    Get list of all subnets
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.list_subnet
+    '''
+    subnets = {}
+    maas = _create_maas_client()
+    json_res = json.loads(maas.get(u'api/2.0/subnets/').read())
+    LOG.info(json_res)
+    for item in json_res:
+        subnets[item["name"]] = item
+    return subnets
+
+
+def list_vlans(fabric):
+    '''
+    Get list of all vlans for specific fabric
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.list_vlans
+    '''
+    vlans = {}
+    maas = _create_maas_client()
+    fabric_id = get_fabric(fabric)
+
+    json_res = json.loads(
+        maas.get(u'api/2.0/fabrics/{0}/vlans/'.format(fabric_id)).read())
+    LOG.info(json_res)
+    for item in json_res:
+        vlans[item["name"]] = item
+    return vlans
+
+
+def get_fabric(fabric):
+    '''
+    Get id for specific fabric
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt-call maasng.get_fabric fabric_name
+    '''
+    try:
+        return list_fabric()[fabric]['id']
+    except KeyError:
+        return {"error": "Frabic not found on MaaS server"}
+
+
+def update_vlan(name, fabric, vid, description, dhcp_on=False):
+    '''
+    Update vlan
+
+    CLI Example:
+
+    .. code-block:: bash
+
+        salt 'maas-node' maasng.update_vlan name, fabric, vid, description, dhcp_on
+    '''
+    result = {}
+
+    data = {
+        "name": name,
+        "dhcp_on": str(dhcp_on),
+        "description": description,
+    }
+    maas = _create_maas_client()
+    fabric_id = get_fabric(fabric)
+
+    json_res = json.loads(maas.put(
+        u'api/2.0/fabrics/{0}/vlans/{1}/'.format(fabric_id, vid), **data).read())
+    print(json_res)
+    result["new"] = "Vlan {0} was updated".format(json_res["name"])
+
+    return result
diff --git a/_states/maasng.py b/_states/maasng.py
new file mode 100644
index 0000000..ad30025
--- /dev/null
+++ b/_states/maasng.py
@@ -0,0 +1,358 @@
+
+import logging
+from salt.exceptions import CommandExecutionError, SaltInvocationError
+
+LOG = logging.getLogger(__name__)
+
+SIZE = {
+    "M": 1000000,
+    "G": 1000000000,
+    "T": 1000000000000,
+}
+
+RAID = {
+    0: "raid-0",
+    1: "raid-1",
+    5: "raid-5",
+    10: "raid-10",
+}
+
+
+def __virtual__():
+    '''
+    Load MaaSng module
+    '''
+    return 'maasng'
+
+
+def disk_layout_present(hostname, layout_type, root_size=None, root_device=None, volume_group=None, volume_name=None, volume_size=None, disk={}, **kwargs):
+    '''
+    Ensure that the disk layout does exist
+
+    :param name: The name of the cloud that should not exist
+    '''
+    ret = {'name': hostname,
+           'changes': {},
+           'result': True,
+           'comment': 'Disk layout "{0}" updated'.format(hostname)}
+
+    machine = __salt__['maasng.get_machine'](hostname)
+    if "error" in machine:
+        ret['comment'] = "State execution failed for machine {0}".format(
+            hostname)
+        ret['result'] = False
+        ret['changes'] = machine
+        return ret
+
+    if machine["status_name"] != "Ready":
+        ret['comment'] = 'Machine {0} is not in Ready state.'.format(hostname)
+        return ret
+
+    if __opts__['test']:
+        ret['result'] = None
+        ret['comment'] = 'Disk layout will be updated on {0}, this action will delete current layout.'.format(
+            hostname)
+        return ret
+
+    if layout_type == "flat":
+
+        ret["changes"] = __salt__['maasng.update_disk_layout'](
+            hostname, layout_type, root_size, root_device)
+
+    elif layout_type == "lvm":
+
+        ret["changes"] = __salt__['maasng.update_disk_layout'](
+            hostname, layout_type, root_size, root_device, volume_group, volume_name, volume_size)
+
+    else:
+        ret["comment"] = "Not supported layout provided. Choose flat or lvm"
+        ret['result'] = False
+
+    return ret
+
+
+def raid_present(hostname, name, level, devices=[], partitions=[], partition_schema={}):
+    '''
+    Ensure that the raid does exist
+
+    :param name: The name of the cloud that should not exist
+    '''
+
+    ret = {'name': name,
+           'changes': {},
+           'result': True,
+           'comment': 'Raid {0} presented on {1}'.format(name, hostname)}
+
+    machine = __salt__['maasng.get_machine'](hostname)
+    if "error" in machine:
+        ret['comment'] = "State execution failed for machine {0}".format(
+            hostname)
+        ret['result'] = False
+        ret['changes'] = machine
+        return ret
+
+    if machine["status_name"] != "Ready":
+        ret['comment'] = 'Machine {0} is not in Ready state.'.format(hostname)
+        return ret
+
+    if __opts__['test']:
+        ret['result'] = None
+        ret['comment'] = 'Raid {0} will be updated on {1}'.format(
+            name, hostname)
+        return ret
+
+    # Validate that raid exists
+    # With correct devices/partition
+    # OR
+    # Create raid
+
+    ret["changes"] = __salt__['maasng.create_raid'](
+        hostname=hostname, name=name, level=level, disks=devices, partitions=partitions)
+
+    # TODO partitions
+    ret["changes"].update(disk_partition_present(
+        hostname, name, partition_schema)["changes"])
+
+    if "error" in ret["changes"]:
+        ret["result"] = False
+
+    return ret
+
+
+def disk_partition_present(hostname, disk, partition_schema={}):
+    '''
+    Ensure that the disk has correct partititioning schema
+
+    :param name: The name of the cloud that should not exist
+    '''
+
+    # 1. Validate that disk has correct values for size and mount
+    # a. validate count of partitions
+    # b. validate size of partitions
+    # 2. If not delete all partitions on disk and recreate schema
+    # 3. Validate type exists
+    # if should not exits
+    # delete mount and unformat
+    # 4. Validate mount exists
+    # 5. if not enforce umount or mount
+
+    ret = {'name': hostname,
+           'changes': {},
+           'result': True,
+           'comment': 'Disk layout {0} presented'.format(disk)}
+
+    machine = __salt__['maasng.get_machine'](hostname)
+    if "error" in machine:
+        ret['comment'] = "State execution failed for machine {0}".format(
+            hostname)
+        ret['result'] = False
+        ret['changes'] = machine
+        return ret
+
+    if machine["status_name"] != "Ready":
+        ret['comment'] = 'Machine {0} is not in Ready state.'.format(hostname)
+        return ret
+
+    if __opts__['test']:
+        ret['result'] = None
+        ret['comment'] = 'Partition schema will be changed on {0}'.format(disk)
+        return ret
+
+    partitions = __salt__['maasng.list_partitions'](hostname, disk)
+
+    # Calculate actual size in bytes from provided data
+    for part_name, part in partition_schema.iteritems():
+        size, unit = part["size"][:-1], part["size"][-1]
+        part["calc_size"] = int(size) * SIZE[unit]
+
+    if len(partitions) == len(partition_schema):
+
+        for part_name, part in partition_schema.iteritems():
+            LOG.info('validated {0}'.format(part["calc_size"]))
+            LOG.info('validated {0}'.format(
+                int(partitions[disk+"-"+part_name.split("-")[-1]]["size"])))
+            if part["calc_size"] == int(partitions[disk+"-"+part_name.split("-")[-1]]["size"]):
+                LOG.info('validated')
+                # TODO validate size (size from maas is not same as calculate?)
+                # TODO validate mount
+                # TODO validate fs type
+            else:
+                LOG.info('breaking')
+                break
+            return ret
+
+    #DELETE and RECREATE
+    LOG.info('delete')
+    for partition_name, partition in partitions.iteritems():
+        LOG.info(partition)
+        # TODO IF LVM create ERROR
+        ret["changes"] = __salt__['maasng.delete_partition_by_id'](
+            hostname, disk, partition["id"])
+
+    LOG.info('recreating')
+    for part_name, part in partition_schema.iteritems():
+        LOG.info("partitition for creation")
+        LOG.info(part)
+        if "mount" not in part:
+            part["mount"] = None
+        if "type" not in part:
+            part["type"] = None
+        ret["changes"] = __salt__['maasng.create_partition'](
+            hostname, disk, part["size"], part["type"], part["mount"])
+
+    if "error" in ret["changes"]:
+        ret["result"] = False
+
+    return ret
+
+
+def volume_group_present(hostname, name, devices=[], partitions=[]):
+    '''
+    Ensure that the disk layout does exist
+
+    :param name: The name of the cloud that should not exist
+    '''
+    ret = {'name': hostname,
+           'changes': {},
+           'result': True,
+           'comment': 'LVM group {0} presented on {1}'.format(name, hostname)}
+
+    machine = __salt__['maasng.get_machine'](hostname)
+    if "error" in machine:
+        ret['comment'] = "State execution failed for machine {0}".format(
+            hostname)
+        ret['result'] = False
+        ret['changes'] = machine
+        return ret
+
+    if machine["status_name"] != "Ready":
+        ret['comment'] = 'Machine {0} is not in Ready state.'.format(hostname)
+        return ret
+
+    # TODO validation if exists
+    vgs = __salt__['maasng.list_volume_groups'](hostname)
+
+    if name in vgs:
+        # TODO validation for devices and partitions
+        return ret
+
+    if __opts__['test']:
+        ret['result'] = None
+        ret['comment'] = 'LVM group {0} will be updated on {1}'.format(
+            name, hostname)
+        return ret
+
+    ret["changes"] = __salt__['maasng.create_volume_group'](
+        hostname, name, devices, partitions)
+
+    if "error" in ret["changes"]:
+        ret["result"] = False
+
+    return ret
+
+
+def volume_present(hostname, name, volume_group_name, size, type=None, mount=None):
+    '''
+    Ensure that the disk layout does exist
+
+    :param name: The name of the cloud that should not exist
+    '''
+
+    ret = {'name': hostname,
+           'changes': {},
+           'result': True,
+           'comment': 'LVM group {0} presented on {1}'.format(name, hostname)}
+
+    machine = __salt__['maasng.get_machine'](hostname)
+    if "error" in machine:
+        ret['comment'] = "State execution failed for machine {0}".format(
+            hostname)
+        ret['result'] = False
+        ret['changes'] = machine
+        return ret
+
+    if machine["status_name"] != "Ready":
+        ret['comment'] = 'Machine {0} is not in Ready state.'.format(hostname)
+        return ret
+
+    if __opts__['test']:
+        ret['result'] = None
+        ret['comment'] = 'LVM volume {0} will be updated on {1}'.format(
+            name, hostname)
+
+    # TODO validation if exists
+
+    ret["changes"] = __salt__['maasng.create_volume'](
+        hostname, name, volume_group_name, size, type, mount)
+
+    return ret
+
+
+def select_boot_disk(hostname, name):
+    '''
+    Select disk that will be used to boot partition
+
+    :param name: The name of disk on machine
+    :param hostname: The hostname of machine
+    '''
+
+    ret = {'name': hostname,
+           'changes': {},
+           'result': True,
+           'comment': 'LVM group {0} presented on {1}'.format(name, hostname)}
+
+    machine = __salt__['maasng.get_machine'](hostname)
+    if "error" in machine:
+        ret['comment'] = "State execution failed for machine {0}".format(
+            hostname)
+        ret['result'] = False
+        ret['changes'] = machine
+        return ret
+
+    if machine["status_name"] != "Ready":
+        ret['comment'] = 'Machine {0} is not in Ready state.'.format(hostname)
+        return ret
+
+    if __opts__['test']:
+        ret['result'] = None
+        ret['comment'] = 'LVM volume {0} will be updated on {1}'.format(
+            name, hostname)
+
+    # TODO disk validation if exists
+
+    ret["changes"] = __salt__['maasng.set_boot_disk'](hostname, name)
+
+    return ret
+
+
+def update_vlan(name, fabric, vid, description, dhcp_on=False):
+    '''
+
+    :param name: Name of vlan
+    :param fabric: Name of fabric
+    :param vid: Vlan id
+    :param description: Description of vlan
+    :param dhcp_on: State of dhcp
+
+    '''
+
+    ret = {'name': fabric,
+           'changes': {},
+           'result': True,
+           'comment': 'Module function maasng.update_vlan executed'}
+
+    ret["changes"] = __salt__['maasng.update_vlan'](
+        name=name, fabric=fabric, vid=vid, description=description, dhcp_on=dhcp_on)
+
+    if "error" in fabric:
+        ret['comment'] = "State execution failed for fabric {0}".format(fabric)
+        ret['result'] = False
+        ret['changes'] = fabric
+        return ret
+
+    if __opts__['test']:
+        ret['result'] = None
+        ret['comment'] = 'Vlan {0} will be updated for {1}'.format(vid, fabric)
+        return ret
+
+    return ret
diff --git a/debian/control b/debian/control
index 9516048..9790e4c 100644
--- a/debian/control
+++ b/debian/control
@@ -10,6 +10,6 @@
 
 Package: salt-formula-maas
 Architecture: all
-Depends: ${misc:Depends}, salt-master, reclass
+Depends: ${misc:Depends}
 Description: maas salt formula
  Install and configure maas system.
diff --git a/maas/files/curtin_userdata_arm64_generic_xenial b/maas/files/curtin_userdata_arm64_generic_xenial
new file mode 100644
index 0000000..b070aaa
--- /dev/null
+++ b/maas/files/curtin_userdata_arm64_generic_xenial
@@ -0,0 +1,44 @@
+{%- from "maas/map.jinja" import cluster with context %}
+{% raw %}
+#cloud-config
+debconf_selections:
+ maas: |
+  {{for line in str(curtin_preseed).splitlines()}}
+  {{line}}
+  {{endfor}}
+{{if third_party_drivers and driver}}
+early_commands:
+  {{py: key_string = ''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}}
+  driver_00_get_key: /bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg
+  driver_01_add_key: ["apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
+  driver_02_add: ["add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
+  driver_03_update_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install {{driver['package']}}"]
+  driver_04_load: ["sh", "-c", "depmod && modprobe {{driver['module']}}"]
+{{endif}}
+late_commands:
+  maas: [wget, '--no-proxy', {{node_disable_pxe_url|escape.json}}, '--post-data', {{node_disable_pxe_data|escape.json}}, '-O', '/dev/null']
+{% endraw %}
+{%- if not cluster.saltstack_repo_key == 'none' %}
+{% set salt_repo_key = salt['hashutil.base64_b64encode'](cluster.saltstack_repo_key) %}
+  apt_00_set_gpg: ["curtin", "in-target", "--", "sh", "-c", "echo '{{salt_repo_key}}' | base64 -d | apt-key add -"]
+{%- endif %}
+{#- NOTE: Re-use amd64 repos on arm64 since most packages are arch independent -#}
+  apt_01_set_repo: ["curtin", "in-target", "--", "sh", "-c", "echo 'deb [arch=amd64] {{ cluster.saltstack_repo_xenial }}' >> /etc/apt/sources.list"]
+{% raw %}
+  apt_03_update: ["curtin", "in-target", "--", "apt-get", "update"]
+  salt_01_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "python-futures", "salt-minion"]
+{% endraw %}
+  salt_02_hostname_set: ["curtin", "in-target", "--", "echo", "{% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}"]
+  salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion"]
+  salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion"]
+{% raw %}
+{{if third_party_drivers and driver}}
+  driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg"
+  driver_02_key_add: ["curtin", "in-target", "--", "apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
+  driver_03_add: ["curtin", "in-target", "--", "add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
+  driver_04_update_install: ["curtin", "in-target", "--", "apt-get", "update", "--quiet"]
+  driver_05_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{driver['package']}}"]
+  driver_06_depmod: ["curtin", "in-target", "--", "depmod"]
+  driver_07_update_initramfs: ["curtin", "in-target", "--", "update-initramfs", "-u"]
+{{endif}}
+{% endraw %}
diff --git a/maas/files/restore.sh b/maas/files/restore.sh
new file mode 100644
index 0000000..121b1b4
--- /dev/null
+++ b/maas/files/restore.sh
@@ -0,0 +1,38 @@
+{%- from "maas/map.jinja" import region with context %}
+
+{%- set database = region.get("database", {}) %}
+
+export PGHOST={{ database.get("host", "localhost") }}
+export PGUSER={{ database.get("username", "maas") }}
+export PGPASSFILE=/root/.pgpass
+
+{%- set db_name = database.get("name", "maasdb") %}
+{%- set backupninja_host = database.initial_data.get("host", grains.id ) %}
+{%- if database.initial_data.age is defined %}
+{%- set age = database.initial_data.get("age", "0") %}
+{%- else %}{%- set age = "0" %}{%- endif %}
+{%- set backupninja_source = database.initial_data.get("source", "cfg01.local")%}
+{%- set source_name = db_name + ".pg_dump.gz" %}
+{%- set dest_name = db_name + ".pg_dump.gz" %}
+{%- set target = "/root/postgresql/restore_data/" %}
+
+mkdir -p {{ target }}
+
+scp backupninja@{{ backupninja_host }}:/srv/backupninja/{{ backupninja_source }}/var/backups/postgresql/postgresql.{{ age }}/{{ source_name }} {{ target }}{{ dest_name }} 
+gunzip -d -1 -f {{ target }}{{ dest_name }}
+
+scp -r backupninja@{{ backupninja_host }}:/srv/backupninja/{{ backupninja_source }}/etc/maas/maas.{{ age }} /etc/maas
+scp -r backupninja@{{ backupninja_host }}:/srv/backupninja/{{ backupninja_source }}/var/lib/maas/maas.{{ age }} /var/lib/maas
+
+sudo systemctl stop maas-dhcpd.service
+sudo systemctl stop maas-rackd.service
+sudo systemctl stop maas-regiond.service
+
+pg_restore {{ target }}{{ db_name }}.pg_dump --dbname={{ db_name }} --no-password -c
+
+mkdir -p /root/maas/flags
+touch /root/maas/flags/{{ db_name }}-installed
+
+sudo systemctl start maas-dhcpd.service
+sudo systemctl start maas-rackd.service
+sudo systemctl start maas-regiond.service
\ No newline at end of file
diff --git a/maas/init.sls b/maas/init.sls
index 878705e..da8376b 100644
--- a/maas/init.sls
+++ b/maas/init.sls
@@ -3,6 +3,9 @@
 {%- if pillar.maas.cluster is defined %}
 - maas.cluster
 {%- endif %}
+{%- if pillar.maas.mirror is defined %}
+- maas.mirror
+{%- endif %}
 {%- if pillar.maas.region is defined %}
 - maas.region
 {%- endif %}
diff --git a/maas/machines/storage.sls b/maas/machines/storage.sls
new file mode 100644
index 0000000..6e138f9
--- /dev/null
+++ b/maas/machines/storage.sls
@@ -0,0 +1,151 @@
+{%- from "maas/map.jinja" import region with context %}
+
+
+maas_login_admin:
+  cmd.run:
+  - name: "maas-region apikey --username {{ region.admin.username }} > /var/lib/maas/.maas_credentials"
+
+
+{%- for machine_name, machine in region.machines.iteritems() %}
+
+{%- if machine.disk_layout is defined %}
+
+{%- if machine.disk_layout.type is defined %}
+
+maas_machines_storage_{{ machine_name }}_{{ machine.disk_layout.type }}:
+  maasng.disk_layout_present:
+    - hostname: {{ machine_name }}
+    - layout_type: {{ machine.disk_layout.type }}
+    {%- if machine.disk_layout.root_size is defined %}
+    - root_size: {{ machine.disk_layout.root_size }}
+    {%- endif %}
+    {%- if machine.disk_layout.root_device is defined %}
+    - root_device: {{ machine.disk_layout.root_device }}
+    {%- endif %}
+    {%- if machine.disk_layout.volume_group is defined %}
+    - volume_group: {{ machine.disk_layout.volume_group }}
+    {%- endif %}
+    {%- if machine.disk_layout.volume_name is defined %}
+    - volume_name: {{ machine.disk_layout.volume_name }}
+    {%- endif %}
+    {%- if machine.disk_layout.volume_size is defined %}
+    - volume_size: {{ machine.disk_layout.volume_size }}
+    {%- endif %}
+    - require:
+      - cmd: maas_login_admin
+
+{%- endif %}
+
+{%- if machine.disk_layout.bootable_device is defined %}
+
+maas_machines_storage_set_bootable_disk_{{ machine_name }}_{{ machine.disk_layout.bootable_device }}:
+  maasng.select_boot_disk:
+  - name: {{ machine.disk_layout.bootable_device }}
+  - hostname: {{ machine_name }}
+  - require:
+    - cmd: maas_login_admin
+
+{%- endif %}
+
+{%- if machine.disk_layout.disk is defined %}
+
+{%- for disk_name, disk in machine.disk_layout.disk.iteritems() %}
+
+{%- if disk.type == "physical" %}
+
+maas_machine_{{ machine_name }}_{{ disk_name }}:
+  maasng.disk_partition_present:
+    - hostname: {{ machine_name }}
+    - disk: {{ disk_name }}
+    - partition_schema: {{ disk.get("partition_schema", {}) }}
+
+{%- endif %}
+
+{%- if disk.type == "raid" %}
+
+maas_machine_{{ machine_name }}_{{ disk_name }}:
+  maasng.raid_present:
+    - hostname: {{ machine_name }}
+    - name: {{ disk_name }}
+    - level: {{ disk.level }}
+    - devices: {{ disk.get("devices", []) }}
+    - partitions: {{ disk.get("partitions", []) }}
+    - partition_schema: {{ disk.get("partition_schema", {}) }}
+    - require:
+      - cmd: maas_login_admin
+    {%- if disk.devices is defined %}
+    {%- for device_name in disk.devices %}
+      {%- if salt['pillar.get']('maas:region:machines:'+machine_name+':disk_layout:disk:'+device_name) is mapping %}
+      - maasng: maas_machine_{{ machine_name }}_{{ device_name }}
+      {%- endif %}
+    {%- endfor %}
+    {%- endif %}
+    {%- if disk.partitions is defined %}
+    {%- for partition in disk.partitions %}
+      {% set device_name = partition.split('-')[0] %}
+      {%- if salt['pillar.get']('maas:region:machines:'+machine_name+':disk_layout:disk:'+device_name) is mapping %}
+      - maasng: maas_machine_{{ machine_name }}_{{ device_name }}
+      {%- endif %}
+    {%- endfor %}
+    {%- endif %}
+{%- endif %}
+
+{%- if disk.type == "lvm" %}
+
+maas_machine_vg_{{ machine_name }}_{{ disk_name }}:
+  maasng.volume_group_present:
+    - hostname: {{ machine_name }}
+    - name: {{ disk_name }}
+    {%- if disk.devices is defined %}
+    - devices: {{ disk.devices }}
+    {%- endif %}
+    {%- if disk.partitions is defined %}
+    - partitions: {{ disk.partitions }}
+    {%- endif %}
+    - require:
+      - cmd: maas_login_admin
+    {%- if disk.partitions is defined %}
+    {%- for partition in disk.partitions %}
+      {% set device_name = partition.split('-')[0] %}
+      {%- if salt['pillar.get']('maas:region:machines:'+machine_name+':disk_layout:disk:'+device_name) is mapping %}
+      - maasng: maas_machine_{{ machine_name }}_{{ device_name }}
+      {%- endif %}
+    {%- endfor %}
+    {%- endif %}
+    {%- if disk.devices is defined %}
+    {%- for device_name in disk.devices %}
+      {%- if salt['pillar.get']('maas:region:machines:'+machine_name+':disk_layout:disk:'+device_name) is mapping %}
+      - maasng: maas_machine_{{ machine_name }}_{{ device_name }}
+      {%- endif %}
+    {%- endfor %}
+    {%- endif %}
+
+{%- for volume_name, volume in disk.volume.iteritems() %}
+
+maas_machine_volume_{{ machine_name }}_{{ disk_name }}_{{ volume_name }}:
+  maasng.volume_present:
+    - hostname: {{ machine_name }}
+    - name: {{ volume_name }}
+    - volume_group_name: {{ disk_name }}
+    - size: {{ volume.size }}
+    {%- if volume.type is defined %}
+    - type: {{ volume.type }}
+    {%- endif %}
+    {%- if volume.mount is defined %}
+    - mount: {{ volume.mount }}
+    {%- endif %}
+    - require:
+      - maasng: maas_machine_vg_{{ machine_name }}_{{ disk_name }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
+
+{%- endfor %}
+
diff --git a/maas/map.jinja b/maas/map.jinja
index 8c0a7d9..bcf8a48 100644
--- a/maas/map.jinja
+++ b/maas/map.jinja
@@ -24,3 +24,11 @@
 {%- endload %}
 
 {%- set region = salt['grains.filter_by'](region_defaults, merge=salt['pillar.get']('maas:region', {})) %}
+
+{%- load_yaml as mirror_defaults %}
+Debian:
+  pkgs:
+  - simplestreams
+{%- endload %}
+
+{%- set mirror = salt['grains.filter_by'](mirror_defaults, merge=salt['pillar.get']('maas:mirror', {})) %}
diff --git a/maas/mirror.sls b/maas/mirror.sls
new file mode 100644
index 0000000..f7c586d
--- /dev/null
+++ b/maas/mirror.sls
@@ -0,0 +1,23 @@
+{%- from "maas/map.jinja" import mirror with context %}
+
+{%- if mirror.get('enabled') %}
+
+{%- if mirror.get('image') %}
+
+maas_mirror_packages:
+  pkg.installed:
+    - names: {{ mirror.pkgs }}
+
+{%- for release_name, release in mirror.image.release.iteritems() %}
+
+mirror_image_{{ release_name }}:
+  cmd.run:
+  - name: "sstream-mirror --keyring={{ release.keyring }} {{ release.upstream }} {{ release.local_dir }} {%- if release.get('arch') %} 'arch={{ release.arch }}'{%- endif %} {%- if release.get('subarch') %} 'subarch~({{ release.subarch }})'{%- endif %} 'release~({{ release_name }})' {%- if release.get('count') %} --max={{ release.count }}{%- endif %}"
+  - require:
+    - pkg: maas_mirror_packages
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/maas/region.sls b/maas/region.sls
index 554f9ba..e129457 100644
--- a/maas/region.sls
+++ b/maas/region.sls
@@ -20,6 +20,25 @@
   - require:
     - pkg: maas_region_packages
 
+{%- if region.database.initial_data is defined %}
+
+/root/maas/scripts/restore_{{ region.database.name }}.sh:
+  file.managed:
+    - source: salt://maas/files/restore.sh
+    - mode: 770
+    - makedirs: true
+    - template: jinja
+
+restore_maas_database_{{ region.database.name }}:
+  cmd.run:
+  - name: /root/maas/scripts/restore_{{ region.database.name }}.sh
+  - unless: "[ -f /root/maas/flags/{{ region.database.name }}-installed ]"
+  - cwd: /root
+  - require:
+    - file: /root/maas/scripts/restore_{{ region.database.name }}.sh
+
+{%- endif %}
+
 {%- if region.get('enable_iframe', False)  %}
 
 /etc/apache2/conf-enabled/maas-http.conf:
@@ -78,6 +97,18 @@
   - require:
     - pkg: maas_region_packages
 
+/etc/maas/preseeds/curtin_userdata_arm64_generic_xenial:
+  file.managed:
+  - source: salt://maas/files/curtin_userdata_arm64_generic_xenial
+  - template: jinja
+  - user: root
+  - group: root
+  - mode: 644
+  - context:
+      salt_master_ip: {{ region.salt_master_ip }}
+  - require:
+    - pkg: maas_region_packages
+
 /root/.pgpass:
   file.managed:
   - source: salt://maas/files/pgpass
@@ -119,6 +150,14 @@
   - require:
     - cmd: maas_login_admin
 
+{%- if region.get('boot_sources', False)  %}
+maas_boot_sources:
+  module.run:
+  - name: maas.process_boot_sources
+  - require:
+    - cmd: maas_set_admin_password
+{%- endif %}
+
 {%- if region.get('commissioning_scripts', False)  %}
 /etc/maas/files/commisioning_scripts/:
   file.directory:
@@ -205,6 +244,21 @@
   - require:
     - module: maas_config
 
+{%- if region.fabrics is defined %}
+{%- for fabric_name, fabric in region.fabrics.iteritems() %}
+{%- for vid, vlan in fabric.get('vlan',{}).items() %}
+maas_update_vlan_for_{{ fabric_name }}_{{ vid }}:
+  maasng.update_vlan:
+  - vid: {{ vid }}
+  - fabric: {{ fabric_name }}
+  - name: {{ vlan.get('name','') }}
+  - description: {{ vlan.description }}
+  - dhcp_on: {{ vlan.get('dhcp','False') }}
+{%- endfor %}
+{%- endfor %}
+{%- endif %}
+
+
 {%- if region.get('sshprefs', False)  %}
 maas_sshprefs:
   module.run:
diff --git a/metadata.yml b/metadata.yml
index ba697dc..dacbd6f 100644
--- a/metadata.yml
+++ b/metadata.yml
@@ -1,3 +1,3 @@
 name: "maas"
 version: "0.1"
-source: "https://github.com/tcpcloud/salt-formula-maas"
+source: "https://github.com/salt-formulas/salt-formula-maas"
diff --git a/metadata/service/cluster/single.yml b/metadata/service/cluster/single.yml
index 5c91bd1..a307dcc 100644
--- a/metadata/service/cluster/single.yml
+++ b/metadata/service/cluster/single.yml
@@ -41,5 +41,5 @@
         MA==
         =dtMN
         -----END PGP PUBLIC KEY BLOCK-----
-      saltstack_repo_trusty: "http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/ trusty main"
-      saltstack_repo_xenial: "http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/ xenial main"
\ No newline at end of file
+      saltstack_repo_trusty: "http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3/ trusty main"
+      saltstack_repo_xenial: "http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/ xenial main"
diff --git a/tests/pillar/disk_layout.sls b/tests/pillar/disk_layout.sls
new file mode 100644
index 0000000..e9a7405
--- /dev/null
+++ b/tests/pillar/disk_layout.sls
@@ -0,0 +1,77 @@
+maas:
+  region:
+    theme: theme
+    bind:
+      host: localhost
+      port: 80
+    admin:
+      username: admin
+      password: password
+      email:  email@example.com
+    database:
+      engine: postgresql
+      host: localhost
+      name: maasdb
+      password: password
+      username: maas
+    enabled: true
+    salt_master_ip: 127.0.0.1
+    machines:
+      server3:
+        disk_layout:
+          type: flat
+          bootable_device: vda
+          disk:
+            vda:
+              type: physical
+              partition_schema:
+                part1:
+                  size: 10G
+                  type: ext4
+                  mount: '/'
+                part2:
+                  size: 2G
+                part3:
+                  size: 3G
+            vdc:
+              type: physical
+              partition_schema:
+                part1:
+                  size: 100%
+            vdd:
+              type: physical
+              partition_schema:
+                part1:
+                  size: 100%
+            raid0:
+              type: raid
+              level: 10
+              devices:
+                - vde
+                - vdf
+              partition_schema:
+                part1:
+                  size: 10G
+                part2:
+                  size: 2G
+                part3:
+                  size: 3G
+            raid1:
+              type: raid
+              level: 1
+              partitions:
+                - vdc-part1
+                - vdd-part1
+            volume_group2:
+              type: lvm
+              devices:
+                - raid1
+              volume:
+                tmp:
+                  size: 5G
+                  fs_type: ext4
+                  mount: '/tmp'
+                log:
+                  size: 7G
+                  fs_type: ext4
+                  mount: '/var/log'
diff --git a/tests/pillar/maas_mirror.sls b/tests/pillar/maas_mirror.sls
new file mode 100644
index 0000000..090c794
--- /dev/null
+++ b/tests/pillar/maas_mirror.sls
@@ -0,0 +1,12 @@
+maas:
+  mirror:
+    enabled: true
+    image:
+      release:
+        xenial:
+          keyring: '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'
+          upstream: 'http://images.maas.io/ephemeral-v3/daily/'
+          local_dir: '/var/www/html/maas/images/ephemeral-v3/daily'
+          arch: amd64
+          subarch: 'generic|hwe-t'
+          count: '1'
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
index 29fb975..9761585 100755
--- a/tests/run_tests.sh
+++ b/tests/run_tests.sh
@@ -39,7 +39,7 @@
     log_info "Setting up Python virtualenv"
     virtualenv $VENV_DIR
     source ${VENV_DIR}/bin/activate
-    pip install salt${PIP_SALT_VERSION}
+    python -m pip install salt${PIP_SALT_VERSION}
 }
 
 setup_mock_bin() {
@@ -124,8 +124,8 @@
 }
 
 salt_run() {
-    [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
-    salt-call ${SALT_OPTS} $*
+    [ -e ${VENV_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+    python $(which salt-call) ${SALT_OPTS} $*
 }
 
 prepare() {