IO: 





1. Virtual disk I/O management mechanism in Xen




2. Token-based-QoS-resource-limiting-for-VBD-I-O





3. dm-ioband




4. ionice


for blkpid in $( pgrep blkback ); do


    # 2nd lowest (6) prio in best-effort class (2)


    ionice -c 2 -n 6 -p $blkpid


done










CPU:





#!/bin/sh




# cpucap



# reschedule all running xen domUs to a medium-low cpu allocation


 


 


# scheduler notes:


    # if you wanna change this to support qos classes (i.e. if the customer can


    # chose a certain performance level, then look at the SEDF and credit2 


    # schedulers, or just play around with the weight.


    # I recommend using the weight for now.


    # also I think a 2-core cap + very heavy weight for dom0 should be looked


    # at when you see any io scalability issues.


 


# hyperthreading notes:


    #we might see hyperthreads so we'll assume 2 cores have a 


    #performance of 130 => and 1 core has 65!


    #this means we'll have to tweak xen vcpu allocation to ensure noone ever 


    #runs on a HT only?


    # potential solution for figuring out 


    # the best solution is using a extended cpu mask (url?) in the cpus= line


    # of the xen config. that, coupled with picking specific core types 


would work, but is extremely complex.


    #


    # I will focus on giving the users multiples of 1core+1ht and stick by the


    # 130 sum then...


 


get_vms()



    xm list | egrep -v 'Name|Domain-0' | awk '{print $1" "$2" "$4}'


}


 


 


set_perf()


{


   xm sched-credit -d domain-id -w weight =c cap 


}


 


# xen4.1 / sched-credit2 testers needed:


# the below command is disabled for safety reasons, can crash dom0



# i bet theres still no regression test for that.


# our first try, later we should give dom0 256 * cores * total_domUs



# or just pin it, if we got lotsa fast cores


 


default_weight=256


dom0_cores=$(grep -c ^proc /proc/cpuinfo)


dom0_weight=$(( $default_weight * $dom0_cores ))


#xm sched-credit2 -d 0 weight $dom0_weight


#we should employ capping for dom0 too, feel free to move to the main code.


#xm sched-credit2 -d 0 -c 400 


 


echo "$(get_vms)" | while read name domid vcpus ; do


    xm sched-credit -d $domid -c $(( $vcpus * 65  ))


done