264 (4499B)
1#!/usr/bin/env python3 2# group: rw 3# 4# Test nbd reconnect 5# 6# Copyright (c) 2019 Virtuozzo International GmbH. 7# 8# This program is free software; you can redistribute it and/or modify 9# it under the terms of the GNU General Public License as published by 10# the Free Software Foundation; either version 2 of the License, or 11# (at your option) any later version. 12# 13# This program is distributed in the hope that it will be useful, 14# but WITHOUT ANY WARRANTY; without even the implied warranty of 15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16# GNU General Public License for more details. 17# 18# You should have received a copy of the GNU General Public License 19# along with this program. If not, see <http://www.gnu.org/licenses/>. 20# 21 22import time 23import os 24 25import iotests 26from iotests import qemu_img_create, file_path, qemu_nbd_popen 27 28disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock') 29nbd_uri = 'nbd+unix:///?socket=' + nbd_sock 30wait_limit = 3.0 31wait_step = 0.2 32 33 34class TestNbdReconnect(iotests.QMPTestCase): 35 def init_vm(self, disk_size): 36 qemu_img_create('-f', iotests.imgfmt, disk_a, str(disk_size)) 37 qemu_img_create('-f', iotests.imgfmt, disk_b, str(disk_size)) 38 self.vm = iotests.VM().add_drive(disk_a) 39 self.vm.launch() 40 self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(disk_size)) 41 42 def tearDown(self): 43 self.vm.shutdown() 44 os.remove(disk_a) 45 os.remove(disk_b) 46 47 def start_job(self, job): 48 """Stat job with nbd target and kill the server""" 49 assert job in ('blockdev-backup', 'blockdev-mirror') 50 with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b): 51 result = self.vm.qmp('blockdev-add', 52 **{'node_name': 'backup0', 53 'driver': 'raw', 54 'file': {'driver': 'nbd', 55 'server': {'type': 'unix', 56 'path': nbd_sock}, 57 'reconnect-delay': 10}}) 58 self.assert_qmp(result, 'return', {}) 59 result = self.vm.qmp(job, device='drive0', 60 sync='full', target='backup0', 61 speed=(1 * 1024 * 1024)) 62 self.assert_qmp(result, 'return', {}) 63 64 # Wait for some progress 65 t = 0.0 66 while t < wait_limit: 67 jobs = self.vm.qmp('query-block-jobs')['return'] 68 if jobs and jobs[0]['offset'] > 0: 69 break 70 time.sleep(wait_step) 71 t += wait_step 72 73 self.assertTrue(jobs and jobs[0]['offset'] > 0) # job started 74 75 jobs = self.vm.qmp('query-block-jobs')['return'] 76 # Check that job is still in progress 77 self.assertTrue(jobs) 78 self.assertTrue(jobs[0]['offset'] < jobs[0]['len']) 79 80 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0) 81 self.assert_qmp(result, 'return', {}) 82 83 # Emulate server down time for 1 second 84 time.sleep(1) 85 86 def test_backup(self): 87 size = 5 * 1024 * 1024 88 self.init_vm(size) 89 self.start_job('blockdev-backup') 90 91 with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b): 92 e = self.vm.event_wait('BLOCK_JOB_COMPLETED') 93 self.assertEqual(e['data']['offset'], size) 94 result = self.vm.qmp('blockdev-del', node_name='backup0') 95 self.assert_qmp(result, 'return', {}) 96 97 def cancel_job(self): 98 result = self.vm.qmp('block-job-cancel', device='drive0', force=True) 99 self.assert_qmp(result, 'return', {}) 100 101 start_t = time.time() 102 self.vm.event_wait('BLOCK_JOB_CANCELLED') 103 delta_t = time.time() - start_t 104 self.assertTrue(delta_t < 2.0) 105 106 def test_mirror_cancel(self): 107 # Mirror speed limit doesn't work well enough, it seems that mirror 108 # will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and 109 # MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk. 110 self.init_vm(20 * 1024 * 1024) 111 self.start_job('blockdev-mirror') 112 self.cancel_job() 113 114 def test_backup_cancel(self): 115 self.init_vm(5 * 1024 * 1024) 116 self.start_job('blockdev-backup') 117 self.cancel_job() 118 119 120if __name__ == '__main__': 121 iotests.main(supported_fmts=['qcow2'])