diff --git a/circle/dashboard/views/storage.py b/circle/dashboard/views/storage.py
index 36fad98..69c0be7 100644
--- a/circle/dashboard/views/storage.py
+++ b/circle/dashboard/views/storage.py
@@ -41,7 +41,7 @@ from ..forms import (
 )
 from .util import FilterMixin
 import json
-
+from celery.exceptions import TimeoutError
 
 logger = logging.getLogger(__name__)
 
@@ -199,6 +199,13 @@ class StorageDetail(SuperuserRequiredMixin, UpdateView):
             context['orphan_disks'] = ds.get_orphan_disks()
         except WorkerNotFound:
             messages.error(self.request, _("The DataStore is offline."))
+        except TimeoutError:
+            messages.error(self.request, _("Operation timed out, "
+                                           "some data may insufficient."))
+        except Exception as e:
+             messages.error(self.request, _("Error occured: %s, "
+                                           "some data may insufficient."
+                                           % unicode(e)))
 
         context['disk_table'] = DiskListTable(
             self.get_table_data(), request=self.request,
diff --git a/circle/storage/models.py b/circle/storage/models.py
index cc905f4..54466aa 100644
--- a/circle/storage/models.py
+++ b/circle/storage/models.py
@@ -106,9 +106,11 @@ class DataStore(Model):
             raise WorkerNotFound()
 
     def get_deletable_disks(self):
-        return [disk.filename for disk in
-                self.disk_set.filter(
-                    destroyed__isnull=False) if disk.is_deletable]
+        deletables = [disk for disk in self.disk_set.filter(
+                      destroyed__isnull=False) if disk.is_deletable]
+        deletables = sorted(deletables, key=lambda disk: disk.destroyed)
+
+        return [disk.filename for disk in deletables]
 
     def get_hosts(self):
 
@@ -127,7 +129,7 @@ class DataStore(Model):
         try:
             return storage_tasks.get_storage_stat.apply_async(
                 args=[self.type, self.path], queue=q).get(timeout=timeout)
-        except TimeoutError:
+        except Exception:
             return {'free_space': -1,
                     'free_percent': -1}
 
@@ -593,11 +595,17 @@ class Disk(TimeStampedModel):
         """
         queue_name = self.datastore.get_remote_queue_name(
             'storage', priority='slow')
-        logger.info("Image: %s at Datastore: %s recovered from trash." %
-                    (self.filename, self.datastore.path))
-        storage_tasks.recover_from_trash.apply_async(
-            args=[self.datastore.path, self.filename],
+        res = storage_tasks.is_exists.apply_async(
+            args=[self.datastore.type,
+                  self.datastore.path,
+                  self.filename],
             queue=queue_name).get(timeout=timeout)
+        if res:
+            logger.info("Image: %s at Datastore: %s recovered." %
+                        (self.filename, self.datastore.path))
+        else:
+            logger.info("Image: %s at Datastore: %s not recovered." %
+                        (self.filename, self.datastore.path))
 
     def save_as(self, task=None, user=None, task_uuid=None, timeout=300):
         """Save VM as template.
diff --git a/circle/storage/tasks/periodic_tasks.py b/circle/storage/tasks/periodic_tasks.py
index e0f6dce..1348120 100644
--- a/circle/storage/tasks/periodic_tasks.py
+++ b/circle/storage/tasks/periodic_tasks.py
@@ -36,17 +36,18 @@ def garbage_collector(timeout=15):
     for ds in DataStore.objects.all():
         queue_name = ds.get_remote_queue_name('storage', priority='fast')
         files = set(storage_tasks.list_files.apply_async(
-            args=[ds.path], queue=queue_name).get(timeout=timeout))
-        disks = set(ds.get_deletable_disks())
+            args=[ds.type, ds.path], queue=queue_name).get(timeout=timeout))
+        disks = ds.get_deletable_disks()
         queue_name = ds.get_remote_queue_name('storage', priority='slow')
-        for i in disks & files:
-            logger.info("Image: %s at Datastore: %s moved to trash folder." %
+
+        deletable_disks = [disk for disk in disks if disk in files]
+        for i in deletable_disks:
+            logger.info("Image: %s at Datastore: %s fetch for destroy." %
                         (i, ds.path))
-            storage_tasks.move_to_trash.apply_async(
-                args=[ds.path, i], queue=queue_name).get(timeout=timeout)
         try:
             storage_tasks.make_free_space.apply_async(
-                args=[ds.path], queue=queue_name).get(timeout=timeout)
+                args=[ds.type, ds.path, deletable_disks],
+                queue=queue_name).get(timeout=timeout)
         except Exception as e:
             logger.warning(str(e))
 
diff --git a/circle/storage/tasks/storage_tasks.py b/circle/storage/tasks/storage_tasks.py
index d15b4b3..8739ad9 100644
--- a/circle/storage/tasks/storage_tasks.py
+++ b/circle/storage/tasks/storage_tasks.py
@@ -39,12 +39,12 @@ def download(disk_desc, url):
 
 
 @celery.task(name='storagedriver.delete')
-def delete(path):
+def delete(disk_desc):
     pass
 
 
 @celery.task(name='storagedriver.delete_dump')
-def delete_dump(path):
+def delete_dump(data_store_type, path):
     pass
 
 
@@ -54,7 +54,7 @@ def snapshot(disk_desc):
 
 
 @celery.task(name='storagedriver.get')
-def get(json_data):
+def get(disk_desc):
     pass
 
 
@@ -63,18 +63,13 @@ def merge(src_disk_desc, dst_disk_desc):
     pass
 
 
-@celery.task(name='storagedriver.make_free_space')
-def make_free_space(datastore, percent):
+@celery.task(name='storagedriver.is_exists')
+def is_exists(data_store_type, path, disk_name):
     pass
 
 
-@celery.task(name='storagedriver.move_to_trash')
-def move_to_trash(datastore, disk_path):
-    pass
-
-
-@celery.task(name='storagedriver.recover_from_trash')
-def recover_from_trash(datastore, disk_path):
+@celery.task(name='storagedriver.make_free_space')
+def make_free_space(datastore, path, deletable_disks, percent):
     pass