hash
stringlengths
40
40
repo
stringlengths
9
36
date
stringlengths
19
19
license
stringclasses
3 values
message
stringlengths
86
367
mods
listlengths
1
15
c27d31c06520c3df4c820ea10d5d16316f4d88cb
cupy/cupy
19.07.2017 16:24:41
MIT License
Support CUDA stream on memory pool Now, memory pool will have an arena (bins) for each stream to avoid concurrent streams touch the same memory block
[ { "change_type": "MODIFY", "old_path": "cupy/cuda/memory.pxd", "new_path": "cupy/cuda/memory.pxd", "diff": "@@ -1,4 +1,5 @@\n from libcpp cimport vector\n+from libcpp cimport unordered_map\n \n from cupy.cuda cimport device\n \n@@ -11,6 +12,7 @@ cdef class Chunk:\n readonly size_t ptr\n readonly Py_ssize_t offset\n readonly Py_ssize_t size\n+ public object stream_ptr\n public Chunk prev\n public Chunk next\n \n@@ -22,15 +24,16 @@ cdef class MemoryPointer:\n readonly size_t ptr\n \n cpdef copy_from_device(self, MemoryPointer src, Py_ssize_t size)\n- cpdef copy_from_device_async(self, MemoryPointer src, size_t size, stream)\n+ cpdef copy_from_device_async(self, MemoryPointer src, size_t size,\n+ stream=?)\n cpdef copy_from_host(self, mem, size_t size)\n- cpdef copy_from_host_async(self, mem, size_t size, stream)\n+ cpdef copy_from_host_async(self, mem, size_t size, stream=?)\n cpdef copy_from(self, mem, size_t size)\n- cpdef copy_from_async(self, mem, size_t size, stream)\n+ cpdef copy_from_async(self, mem, size_t size, stream=?)\n cpdef copy_to_host(self, mem, size_t size)\n- cpdef copy_to_host_async(self, mem, size_t size, stream)\n+ cpdef copy_to_host_async(self, mem, size_t size, stream=?)\n cpdef memset(self, int value, size_t size)\n- cpdef memset_async(self, int value, size_t size, stream)\n+ cpdef memset_async(self, int value, size_t size, stream=?)\n \n \n cpdef MemoryPointer alloc(Py_ssize_t size)\n@@ -44,14 +47,14 @@ cdef class SingleDeviceMemoryPool:\n cdef:\n object _allocator\n dict _in_use\n- list _free\n+ dict _free\n object __weakref__\n object _weakref\n object _free_lock\n object _in_use_lock\n readonly Py_ssize_t _allocation_unit_size\n readonly int _device_id\n- vector.vector[int] _index\n+ unordered_map.unordered_map[size_t, vector.vector[int]] _index\n \n cpdef MemoryPointer _alloc(self, Py_ssize_t size)\n cpdef MemoryPointer malloc(self, Py_ssize_t size)\n@@ -65,8 +68,11 @@ cdef class SingleDeviceMemoryPool:\n cpdef total_bytes(self)\n cpdef Py_ssize_t _round_size(self, Py_ssize_t size)\n cpdef int _bin_index_from_size(self, Py_ssize_t size)\n- cpdef _append_to_free_list(self, Py_ssize_t size, chunk)\n- cpdef bint _remove_from_free_list(self, Py_ssize_t size, chunk) except *\n+ cpdef list _arena(self, size_t stream_ptr)\n+ cdef vector.vector[int]* _arena_index(self, size_t stream_ptr)\n+ cpdef _append_to_free_list(self, Py_ssize_t size, chunk, size_t stream_ptr)\n+ cpdef bint _remove_from_free_list(self, Py_ssize_t size,\n+ chunk, size_t stream_ptr) except *\n cpdef tuple _split(self, Chunk chunk, Py_ssize_t size)\n cpdef Chunk _merge(self, Chunk head, Chunk remaining)\n \n" }, { "change_type": "MODIFY", "old_path": "cupy/cuda/memory.pyx", "new_path": "cupy/cuda/memory.pyx", "diff": "@@ -10,6 +10,7 @@ from libcpp cimport algorithm\n \n from cupy.cuda import memory_hook\n from cupy.cuda import runtime\n+from cupy.cuda import stream as stream_module\n \n from cupy.cuda cimport device\n from cupy.cuda cimport runtime\n@@ -126,24 +127,27 @@ cdef class Chunk:\n mem (Memory): The device memory buffer.\n offset (int): An offset bytes from the head of the buffer.\n size (int): Chunk size in bytes.\n+ stream_ptr (size_t): Raw stream handle of cupy.cuda.Stream\n \n Attributes:\n device (cupy.cuda.Device): Device whose memory the pointer refers to.\n mem (Memory): The device memory buffer.\n- ptr (int): Memory address.\n+ ptr (size_t): Memory address.\n offset (int): An offset bytes from the head of the buffer.\n size (int): Chunk size in bytes.\n prev (Chunk): prev memory pointer if split from a larger allocation\n next (Chunk): next memory pointer if split from a larger allocation\n+ stream_ptr (size_t): Raw stream handle of cupy.cuda.Stream\n \"\"\"\n \n- def __init__(self, mem, Py_ssize_t offset, Py_ssize_t size):\n+ def __init__(self, mem, Py_ssize_t offset, Py_ssize_t size, stream_ptr):\n assert mem.ptr > 0 or offset == 0\n self.mem = mem\n self.device = mem.device\n self.ptr = mem.ptr + offset\n self.offset = offset\n self.size = size\n+ self.stream_ptr = stream_ptr\n self.prev = None\n self.next = None\n \n@@ -163,7 +167,7 @@ cdef class MemoryPointer:\n ~MemoryPointer.device (cupy.cuda.Device): Device whose memory the\n pointer refers to.\n mem (Memory): The device memory buffer.\n- ptr (int): Pointer to the place within the buffer.\n+ ptr (size_t): Pointer to the place within the buffer.\n \"\"\"\n \n def __init__(self, mem, Py_ssize_t offset):\n@@ -217,15 +221,19 @@ cdef class MemoryPointer:\n runtime.memcpy(self.ptr, src.ptr, size,\n runtime.memcpyDefault)\n \n- cpdef copy_from_device_async(self, MemoryPointer src, size_t size, stream):\n+ cpdef copy_from_device_async(self, MemoryPointer src, size_t size,\n+ stream=None):\n \"\"\"Copies a memory from a (possibly different) device asynchronously.\n \n Args:\n src (cupy.cuda.MemoryPointer): Source memory pointer.\n size (int): Size of the sequence in bytes.\n stream (cupy.cuda.Stream): CUDA stream.\n+ The default uses CUDA stream of the current context.\n \n \"\"\"\n+ if stream is None:\n+ stream = stream_module.get_current_stream()\n if size > 0:\n _set_peer_access(src.device.id, self.device.id)\n runtime.memcpyAsync(self.ptr, src.ptr, size,\n@@ -243,7 +251,7 @@ cdef class MemoryPointer:\n runtime.memcpy(self.ptr, mem.value, size,\n runtime.memcpyHostToDevice)\n \n- cpdef copy_from_host_async(self, mem, size_t size, stream):\n+ cpdef copy_from_host_async(self, mem, size_t size, stream=None):\n \"\"\"Copies a memory sequence from the host memory asynchronously.\n \n Args:\n@@ -251,8 +259,11 @@ cdef class MemoryPointer:\n memory.\n size (int): Size of the sequence in bytes.\n stream (cupy.cuda.Stream): CUDA stream.\n+ The default uses CUDA stream of the current context.\n \n \"\"\"\n+ if stream is None:\n+ stream = stream_module.get_current_stream()\n if size > 0:\n runtime.memcpyAsync(self.ptr, mem.value, size,\n runtime.memcpyHostToDevice, stream.ptr)\n@@ -275,7 +286,7 @@ cdef class MemoryPointer:\n else:\n self.copy_from_host(mem, size)\n \n- cpdef copy_from_async(self, mem, size_t size, stream):\n+ cpdef copy_from_async(self, mem, size_t size, stream=None):\n \"\"\"Copies a memory sequence from an arbitrary place asynchronously.\n \n This function is a useful interface that selects appropriate one from\n@@ -287,8 +298,11 @@ cdef class MemoryPointer:\n pointer.\n size (int): Size of the sequence in bytes.\n stream (cupy.cuda.Stream): CUDA stream.\n+ The default uses CUDA stream of the current context.\n \n \"\"\"\n+ if stream is None:\n+ stream = stream_module.get_current_stream()\n if isinstance(mem, MemoryPointer):\n self.copy_from_device_async(mem, size, stream)\n else:\n@@ -306,7 +320,7 @@ cdef class MemoryPointer:\n runtime.memcpy(mem.value, self.ptr, size,\n runtime.memcpyDeviceToHost)\n \n- cpdef copy_to_host_async(self, mem, size_t size, stream):\n+ cpdef copy_to_host_async(self, mem, size_t size, stream=None):\n \"\"\"Copies a memory sequence to the host memory asynchronously.\n \n Args:\n@@ -314,8 +328,11 @@ cdef class MemoryPointer:\n memory.\n size (int): Size of the sequence in bytes.\n stream (cupy.cuda.Stream): CUDA stream.\n+ The default uses CUDA stream of the current context.\n \n \"\"\"\n+ if stream is None:\n+ stream = stream_module.get_current_stream()\n if size > 0:\n runtime.memcpyAsync(mem.value, self.ptr, size,\n runtime.memcpyDeviceToHost, stream.ptr)\n@@ -331,15 +348,18 @@ cdef class MemoryPointer:\n if size > 0:\n runtime.memset(self.ptr, value, size)\n \n- cpdef memset_async(self, int value, size_t size, stream):\n+ cpdef memset_async(self, int value, size_t size, stream=None):\n \"\"\"Fills a memory sequence by constant byte value asynchronously.\n \n Args:\n value (int): Value to fill.\n size (int): Size of the sequence in bytes.\n stream (cupy.cuda.Stream): CUDA stream.\n+ The default uses CUDA stream of the current context.\n \n \"\"\"\n+ if stream is None:\n+ stream = stream_module.get_current_stream()\n if size > 0:\n runtime.memsetAsync(self.ptr, value, size, stream.ptr)\n \n@@ -482,7 +502,7 @@ cdef class SingleDeviceMemoryPool:\n # cf. https://gist.github.com/sonots/41daaa6432b1c8b27ef782cd14064269\n self._allocation_unit_size = 512\n self._in_use = {}\n- self._free = []\n+ self._free = {}\n self._allocator = allocator\n self._weakref = weakref.ref(self)\n self._device_id = device.get_device_id()\n@@ -499,38 +519,62 @@ cdef class SingleDeviceMemoryPool:\n unit = self._allocation_unit_size\n return (size - 1) // unit\n \n- cpdef _append_to_free_list(self, Py_ssize_t size, chunk):\n+ cpdef list _arena(self, size_t stream_ptr):\n+ \"\"\"Get appropriate arena (list of bins) of a given stream\"\"\"\n+ if stream_ptr not in self._free:\n+ self._free[stream_ptr] = []\n+ return self._free[stream_ptr]\n+\n+ cdef vector.vector[int]* _arena_index(self, size_t stream_ptr):\n+ \"\"\"Get appropriate arena sparse index of a given stream\"\"\"\n+ if self._index.count(stream_ptr) == 0:\n+ self._index[stream_ptr] = vector.vector[int]()\n+ return &self._index[stream_ptr]\n+\n+ cpdef _append_to_free_list(self, Py_ssize_t size, chunk,\n+ size_t stream_ptr):\n cdef int index, bin_index\n+ cdef list arena\n cdef set free_list\n+ cdef vector.vector[int]* arena_index\n+\n bin_index = self._bin_index_from_size(size)\n rlock.lock_fastrlock(self._free_lock, -1, True)\n try:\n+ arena = self._arena(stream_ptr)\n+ arena_index = self._arena_index(stream_ptr)\n index = algorithm.lower_bound(\n- self._index.begin(), self._index.end(),\n- bin_index) - self._index.begin()\n- if index < self._index.size() and self._index[index] == bin_index:\n- free_list = self._free[index]\n+ arena_index.begin(), arena_index.end(),\n+ bin_index) - arena_index.begin()\n+ size = <int>arena_index.size()\n+ if index < size and arena_index.at(index) == bin_index:\n+ free_list = arena[index]\n else:\n free_list = set()\n- self._index.insert(\n- self._index.begin() + index, bin_index)\n- self._free.insert(index, free_list)\n+ arena_index.insert(arena_index.begin() + index, bin_index)\n+ arena.insert(index, free_list)\n free_list.add(chunk)\n finally:\n rlock.unlock_fastrlock(self._free_lock)\n \n- cpdef bint _remove_from_free_list(self, Py_ssize_t size, chunk) except *:\n+ cpdef bint _remove_from_free_list(self, Py_ssize_t size, chunk,\n+ size_t stream_ptr) except *:\n cdef int index, bin_index\n+ cdef list arena\n cdef set free_list\n+ cdef vector.vector[int]* arena_index\n+\n bin_index = self._bin_index_from_size(size)\n rlock.lock_fastrlock(self._free_lock, -1, True)\n try:\n+ arena = self._arena(stream_ptr)\n+ arena_index = self._arena_index(stream_ptr)\n index = algorithm.lower_bound(\n- self._index.begin(), self._index.end(),\n- bin_index) - self._index.begin()\n- if self._index[index] != bin_index:\n+ arena_index.begin(), arena_index.end(),\n+ bin_index) - arena_index.begin()\n+ if arena_index.at(index) != bin_index:\n return False\n- free_list = self._free[index]\n+ free_list = arena[index]\n if chunk in free_list:\n free_list.remove(chunk)\n return True\n@@ -545,8 +589,9 @@ cdef class SingleDeviceMemoryPool:\n assert chunk.size >= size\n if chunk.size == size:\n return chunk, None\n- head = Chunk(chunk.mem, chunk.offset, size)\n- remaining = Chunk(chunk.mem, chunk.offset + size, chunk.size - size)\n+ head = Chunk(chunk.mem, chunk.offset, size, chunk.stream_ptr)\n+ remaining = Chunk(chunk.mem, chunk.offset + size, chunk.size - size,\n+ chunk.stream_ptr)\n if chunk.prev is not None:\n head.prev = chunk.prev\n chunk.prev.next = head\n@@ -559,9 +604,10 @@ cdef class SingleDeviceMemoryPool:\n \n cpdef Chunk _merge(self, Chunk head, Chunk remaining):\n \"\"\"Merge previously splitted block (chunk)\"\"\"\n+ assert head.stream_ptr == remaining.stream_ptr\n cdef Chunk merged\n size = head.size + remaining.size\n- merged = Chunk(head.mem, head.offset, size)\n+ merged = Chunk(head.mem, head.offset, size, head.stream_ptr)\n if head.prev is not None:\n merged.prev = head.prev\n merged.prev.next = merged\n@@ -630,16 +676,20 @@ cdef class SingleDeviceMemoryPool:\n if size == 0:\n return MemoryPointer(Memory(0), 0)\n \n+ stream_ptr = stream_module.get_current_stream().ptr\n+\n+ bin_index = self._bin_index_from_size(size)\n # find best-fit, or a smallest larger allocation\n rlock.lock_fastrlock(self._free_lock, -1, True)\n- bin_index = self._bin_index_from_size(size)\n try:\n+ arena = self._arena(stream_ptr)\n+ arena_index = self._arena_index(stream_ptr)\n index = algorithm.lower_bound(\n- self._index.begin(), self._index.end(),\n- bin_index) - self._index.begin()\n- length = self._index.size()\n+ arena_index.begin(), arena_index.end(),\n+ bin_index) - arena_index.begin()\n+ length = arena_index.size()\n for i in range(index, length):\n- free_list = self._free[i]\n+ free_list = arena[i]\n if free_list:\n chunk = free_list.pop()\n break\n@@ -670,15 +720,16 @@ cdef class SingleDeviceMemoryPool:\n else:\n total = size + self.total_bytes()\n raise OutOfMemoryError(size, total)\n- chunk = Chunk(mem, 0, size)\n+ chunk = Chunk(mem, 0, size, stream_ptr)\n \n+ assert chunk.stream_ptr == stream_ptr\n rlock.lock_fastrlock(self._in_use_lock, -1, True)\n try:\n self._in_use[chunk.ptr] = chunk\n finally:\n rlock.unlock_fastrlock(self._in_use_lock)\n if remaining is not None:\n- self._append_to_free_list(remaining.size, remaining)\n+ self._append_to_free_list(remaining.size, remaining, stream_ptr)\n pmem = PooledMemory(chunk, self._weakref)\n return MemoryPointer(pmem, 0)\n \n@@ -693,16 +744,19 @@ cdef class SingleDeviceMemoryPool:\n rlock.unlock_fastrlock(self._in_use_lock)\n if chunk is None:\n raise RuntimeError('Cannot free out-of-pool memory')\n+ stream_ptr = chunk.stream_ptr\n \n if chunk.next is not None:\n- if self._remove_from_free_list(chunk.next.size, chunk.next):\n+ if self._remove_from_free_list(chunk.next.size, chunk.next,\n+ stream_ptr):\n chunk = self._merge(chunk, chunk.next)\n \n if chunk.prev is not None:\n- if self._remove_from_free_list(chunk.prev.size, chunk.prev):\n+ if self._remove_from_free_list(chunk.prev.size, chunk.prev,\n+ stream_ptr):\n chunk = self._merge(chunk.prev, chunk)\n \n- self._append_to_free_list(chunk.size, chunk)\n+ self._append_to_free_list(chunk.size, chunk, stream_ptr)\n \n cpdef free_all_blocks(self):\n cdef set free_list, keep_list\n@@ -710,13 +764,14 @@ cdef class SingleDeviceMemoryPool:\n # Free all **non-split** chunks\n rlock.lock_fastrlock(self._free_lock, -1, True)\n try:\n- for i in range(len(self._free)):\n- free_list = self._free[i]\n- keep_list = set()\n- for chunk in free_list:\n- if chunk.prev is not None or chunk.next is not None:\n- keep_list.add(chunk)\n- self._free[i] = keep_list\n+ for arena in self._free.itervalues():\n+ for i in range(len(arena)):\n+ free_list = arena[i]\n+ keep_list = set()\n+ for chunk in free_list:\n+ if chunk.prev is not None or chunk.next is not None:\n+ keep_list.add(chunk)\n+ arena[i] = keep_list\n finally:\n rlock.unlock_fastrlock(self._free_lock)\n \n@@ -731,8 +786,9 @@ cdef class SingleDeviceMemoryPool:\n cdef set free_list\n rlock.lock_fastrlock(self._free_lock, -1, True)\n try:\n- for free_list in self._free:\n- n += len(free_list)\n+ for arena in self._free.itervalues():\n+ for v in arena:\n+ n += len(v)\n finally:\n rlock.unlock_fastrlock(self._free_lock)\n return n\n@@ -754,9 +810,10 @@ cdef class SingleDeviceMemoryPool:\n cdef Chunk chunk\n rlock.lock_fastrlock(self._free_lock, -1, True)\n try:\n- for free_list in self._free:\n- for chunk in free_list:\n- size += chunk.size\n+ for arena in self._free.itervalues():\n+ for free_list in arena:\n+ for chunk in free_list:\n+ size += chunk.size\n finally:\n rlock.unlock_fastrlock(self._free_lock)\n return size\n" }, { "change_type": "MODIFY", "old_path": "tests/cupy_tests/cuda_tests/test_memory.py", "new_path": "tests/cupy_tests/cuda_tests/test_memory.py", "diff": "@@ -3,6 +3,7 @@ import unittest\n \n import cupy.cuda\n from cupy.cuda import memory\n+from cupy.cuda import stream as stream_module\n from cupy import testing\n \n \n@@ -105,6 +106,8 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n def setUp(self):\n self.pool = memory.SingleDeviceMemoryPool(allocator=mock_alloc)\n self.unit = self.pool._allocation_unit_size\n+ self.stream = stream_module.Stream()\n+ self.stream_ptr = self.stream.ptr\n \n def test_round_size(self):\n self.assertEqual(self.pool._round_size(self.unit - 1), self.unit)\n@@ -118,46 +121,52 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n \n def test_split(self):\n mem = MockMemory(self.unit * 4)\n- chunk = memory.Chunk(mem, 0, mem.size)\n+ chunk = memory.Chunk(mem, 0, mem.size, self.stream_ptr)\n head, tail = self.pool._split(chunk, self.unit * 2)\n- self.assertEqual(head.ptr, chunk.ptr)\n- self.assertEqual(head.offset, 0)\n- self.assertEqual(head.size, self.unit * 2)\n- self.assertEqual(head.prev, None)\n+ self.assertEqual(head.ptr, chunk.ptr)\n+ self.assertEqual(head.offset, 0)\n+ self.assertEqual(head.size, self.unit * 2)\n+ self.assertEqual(head.prev, None)\n self.assertEqual(head.next.ptr, tail.ptr)\n- self.assertEqual(tail.ptr, chunk.ptr + self.unit * 2)\n- self.assertEqual(tail.offset, self.unit * 2)\n- self.assertEqual(tail.size, self.unit * 2)\n+ self.assertEqual(head.stream_ptr, self.stream_ptr)\n+ self.assertEqual(tail.ptr, chunk.ptr + self.unit * 2)\n+ self.assertEqual(tail.offset, self.unit * 2)\n+ self.assertEqual(tail.size, self.unit * 2)\n self.assertEqual(tail.prev.ptr, head.ptr)\n- self.assertEqual(tail.next, None)\n+ self.assertEqual(tail.next, None)\n+ self.assertEqual(tail.stream_ptr, self.stream_ptr)\n \n head_of_head, tail_of_head = self.pool._split(head, self.unit)\n- self.assertEqual(head_of_head.ptr, chunk.ptr)\n- self.assertEqual(head_of_head.offset, 0)\n- self.assertEqual(head_of_head.size, self.unit)\n- self.assertEqual(head_of_head.prev, None)\n+ self.assertEqual(head_of_head.ptr, chunk.ptr)\n+ self.assertEqual(head_of_head.offset, 0)\n+ self.assertEqual(head_of_head.size, self.unit)\n+ self.assertEqual(head_of_head.prev, None)\n self.assertEqual(head_of_head.next.ptr, tail_of_head.ptr)\n- self.assertEqual(tail_of_head.ptr, chunk.ptr + self.unit)\n- self.assertEqual(tail_of_head.offset, self.unit)\n- self.assertEqual(tail_of_head.size, self.unit)\n+ self.assertEqual(head_of_head.stream_ptr, self.stream_ptr)\n+ self.assertEqual(tail_of_head.ptr, chunk.ptr + self.unit)\n+ self.assertEqual(tail_of_head.offset, self.unit)\n+ self.assertEqual(tail_of_head.size, self.unit)\n self.assertEqual(tail_of_head.prev.ptr, head_of_head.ptr)\n self.assertEqual(tail_of_head.next.ptr, tail.ptr)\n+ self.assertEqual(tail_of_head.stream_ptr, self.stream_ptr)\n \n head_of_tail, tail_of_tail = self.pool._split(tail, self.unit)\n- self.assertEqual(head_of_tail.ptr, chunk.ptr + self.unit * 2)\n- self.assertEqual(head_of_tail.offset, self.unit * 2)\n- self.assertEqual(head_of_tail.size, self.unit)\n+ self.assertEqual(head_of_tail.ptr, chunk.ptr + self.unit * 2)\n+ self.assertEqual(head_of_tail.offset, self.unit * 2)\n+ self.assertEqual(head_of_tail.size, self.unit)\n self.assertEqual(head_of_tail.prev.ptr, tail_of_head.ptr)\n self.assertEqual(head_of_tail.next.ptr, tail_of_tail.ptr)\n- self.assertEqual(tail_of_tail.ptr, chunk.ptr + self.unit * 3)\n- self.assertEqual(tail_of_tail.offset, self.unit * 3)\n- self.assertEqual(tail_of_tail.size, self.unit)\n+ self.assertEqual(head_of_tail.stream_ptr, self.stream_ptr)\n+ self.assertEqual(tail_of_tail.ptr, chunk.ptr + self.unit * 3)\n+ self.assertEqual(tail_of_tail.offset, self.unit * 3)\n+ self.assertEqual(tail_of_tail.size, self.unit)\n self.assertEqual(tail_of_tail.prev.ptr, head_of_tail.ptr)\n- self.assertEqual(tail_of_tail.next, None)\n+ self.assertEqual(tail_of_tail.next, None)\n+ self.assertEqual(tail_of_tail.stream_ptr, self.stream_ptr)\n \n def test_merge(self):\n mem = MockMemory(self.unit * 4)\n- chunk = memory.Chunk(mem, 0, mem.size)\n+ chunk = memory.Chunk(mem, 0, mem.size, self.stream_ptr)\n \n head, tail = self.pool._split(chunk, self.unit * 2)\n head_ptr, tail_ptr = head.ptr, tail.ptr\n@@ -165,25 +174,28 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n head_of_tail, tail_of_tail = self.pool._split(tail, self.unit)\n \n merged_head = self.pool._merge(head_of_head, tail_of_head)\n- self.assertEqual(merged_head.ptr, head.ptr)\n- self.assertEqual(merged_head.offset, head.offset)\n- self.assertEqual(merged_head.size, head.size)\n- self.assertEqual(merged_head.prev, None)\n+ self.assertEqual(merged_head.ptr, head.ptr)\n+ self.assertEqual(merged_head.offset, head.offset)\n+ self.assertEqual(merged_head.size, head.size)\n+ self.assertEqual(merged_head.prev, None)\n self.assertEqual(merged_head.next.ptr, tail_ptr)\n+ self.assertEqual(merged_head.stream_ptr, self.stream_ptr)\n \n merged_tail = self.pool._merge(head_of_tail, tail_of_tail)\n- self.assertEqual(merged_tail.ptr, tail.ptr)\n- self.assertEqual(merged_tail.offset, tail.offset)\n- self.assertEqual(merged_tail.size, tail.size)\n+ self.assertEqual(merged_tail.ptr, tail.ptr)\n+ self.assertEqual(merged_tail.offset, tail.offset)\n+ self.assertEqual(merged_tail.size, tail.size)\n self.assertEqual(merged_tail.prev.ptr, head_ptr)\n- self.assertEqual(merged_tail.next, None)\n+ self.assertEqual(merged_tail.next, None)\n+ self.assertEqual(merged_tail.stream_ptr, self.stream_ptr)\n \n merged = self.pool._merge(merged_head, merged_tail)\n- self.assertEqual(merged.ptr, chunk.ptr)\n+ self.assertEqual(merged.ptr, chunk.ptr)\n self.assertEqual(merged.offset, chunk.offset)\n- self.assertEqual(merged.size, chunk.size)\n- self.assertEqual(merged.prev, None)\n- self.assertEqual(merged.next, None)\n+ self.assertEqual(merged.size, chunk.size)\n+ self.assertEqual(merged.prev, None)\n+ self.assertEqual(merged.next, None)\n+ self.assertEqual(merged.stream_ptr, self.stream_ptr)\n \n def test_alloc(self):\n p1 = self.pool.malloc(self.unit * 4)\n@@ -209,6 +221,14 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n p2 = self.pool.malloc(self.unit * 4)\n self.assertEqual(ptr1, p2.ptr)\n \n+ def test_free_stream(self):\n+ p1 = self.pool.malloc(self.unit * 4)\n+ ptr1 = p1.ptr\n+ del p1\n+ with self.stream:\n+ p2 = self.pool.malloc(self.unit * 4)\n+ self.assertNotEqual(ptr1, p2.ptr)\n+\n def test_free_merge(self):\n p = self.pool.malloc(self.unit * 4)\n ptr = p.ptr\n@@ -250,7 +270,10 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n self.assertNotEqual(ptr1, p2.ptr)\n del p2\n \n+ def test_free_all_blocks_split(self):\n # do not free splitted blocks\n+ p = self.pool.malloc(self.unit * 4)\n+ del p\n head = self.pool.malloc(self.unit * 2)\n tail = self.pool.malloc(self.unit * 2)\n tailptr = tail.ptr\n@@ -260,6 +283,23 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n self.assertEqual(tailptr, p.ptr)\n del head\n \n+ def test_free_all_blocks_stream(self):\n+ p1 = self.pool.malloc(self.unit * 4)\n+ ptr1 = p1.ptr\n+ del p1\n+ with self.stream:\n+ p2 = self.pool.malloc(self.unit * 4)\n+ ptr2 = p2.ptr\n+ del p2\n+ self.pool.free_all_blocks()\n+ p3 = self.pool.malloc(self.unit * 4)\n+ self.assertNotEqual(ptr1, p3.ptr)\n+ self.assertNotEqual(ptr2, p3.ptr)\n+ with self.stream:\n+ p4 = self.pool.malloc(self.unit * 4)\n+ self.assertNotEqual(ptr1, p4.ptr)\n+ self.assertNotEqual(ptr2, p4.ptr)\n+\n def test_free_all_free(self):\n p1 = self.pool.malloc(self.unit * 4)\n ptr1 = p1.ptr\n@@ -282,6 +322,14 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n self.assertEqual(self.unit * 1, self.pool.used_bytes())\n del p3\n \n+ def test_used_bytes_stream(self):\n+ p1 = self.pool.malloc(self.unit * 4)\n+ del p1\n+ with self.stream:\n+ p2 = self.pool.malloc(self.unit * 2)\n+ self.assertEqual(self.unit * 2, self.pool.used_bytes())\n+ del p2\n+\n def test_free_bytes(self):\n p1 = self.pool.malloc(self.unit * 2)\n self.assertEqual(self.unit * 0, self.pool.free_bytes())\n@@ -295,6 +343,14 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n self.assertEqual(self.unit * 5, self.pool.free_bytes())\n del p3\n \n+ def test_free_bytes_stream(self):\n+ p1 = self.pool.malloc(self.unit * 4)\n+ del p1\n+ with self.stream:\n+ p2 = self.pool.malloc(self.unit * 2)\n+ self.assertEqual(self.unit * 4, self.pool.free_bytes())\n+ del p2\n+\n def test_total_bytes(self):\n p1 = self.pool.malloc(self.unit * 2)\n self.assertEqual(self.unit * 2, self.pool.total_bytes())\n@@ -308,6 +364,14 @@ class TestSingleDeviceMemoryPool(unittest.TestCase):\n self.assertEqual(self.unit * 6, self.pool.total_bytes())\n del p3\n \n+ def test_total_bytes_stream(self):\n+ p1 = self.pool.malloc(self.unit * 4)\n+ del p1\n+ with self.stream:\n+ p2 = self.pool.malloc(self.unit * 2)\n+ self.assertEqual(self.unit * 6, self.pool.total_bytes())\n+ del p2\n+\n \n @testing.parameterize(*testing.product({\n 'allocator': [memory._malloc, memory.malloc_managed],\n" } ]
6683a9aa7bae67e855cd9d1f17fdc49eb3f6dea0
cupy/cupy
17.06.2020 22:41:09
MIT License
Complete overhaul of filter testing. These tests are much more flexible now for when additional filters are added.
[ { "change_type": "MODIFY", "old_path": "tests/cupyx_tests/scipy_tests/ndimage_tests/test_filters.py", "new_path": "tests/cupyx_tests/scipy_tests/ndimage_tests/test_filters.py", "diff": "@@ -11,359 +11,349 @@ try:\n except ImportError:\n pass\n \n-# ######### Testing convolve and correlate ##########\n \n+class FilterTestCaseBase(unittest.TestCase):\n+ \"\"\"\n+ Add some utility methods for the parameterized tests for filters. these\n+ assume there are the \"parameters\" self.filter, self.wdtype or self.dtype,\n+ and self.ndim, self.kshape, or self.shape. Other optional \"parameters\" are\n+ also used if available like self.footprint when the filter is a filter\n+ that uses the footprint. These methods allow testing across multiple\n+ filter types much more easily.\n+ \"\"\"\n+\n+ # default param values if not provided\n+ filter = 'convolve'\n+ shape = (4, 5)\n+ ksize = 3\n+ dtype = numpy.float64\n+ footprint = True\n \n-@testing.parameterize(*(\n- testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'mode': ['reflect'],\n- 'cval': [0.0],\n- 'origin': [0, 1, None],\n- 'adtype': [numpy.int8, numpy.int16, numpy.int32,\n- numpy.float32, numpy.float64],\n- 'wdtype': [None, numpy.int32, numpy.float64],\n- 'output': [None, numpy.int32, numpy.float64],\n- 'filter': ['convolve', 'correlate']\n- }) + testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'mode': ['constant'],\n- 'cval': [-1.0, 0.0, 1.0],\n- 'origin': [0],\n- 'adtype': [numpy.int32, numpy.float64],\n- 'wdtype': [None],\n- 'output': [None],\n- 'filter': ['convolve', 'correlate']\n- }) + testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'mode': ['nearest', 'mirror', 'wrap'],\n- 'cval': [0.0],\n- 'origin': [0],\n- 'adtype': [numpy.int32, numpy.float64],\n- 'wdtype': [None],\n- 'output': [None],\n- 'filter': ['convolve', 'correlate']\n- })\n-))\n-@testing.gpu\n-@testing.with_requires('scipy')\n-class TestConvolveAndCorrelate(unittest.TestCase):\n \n- def _filter(self, xp, scp, a, w):\n+ # Params that need no processing and just go into kwargs\n+ KWARGS_PARAMS = ('output', 'axis', 'mode', 'cval')\n+\n+\n+ def _filter(self, xp, scp):\n+ \"\"\"\n+ The function that all tests end up calling, possibly after a few\n+ adjustments to the class \"parameters\".\n+ \"\"\"\n+ # The filter function\n filter = getattr(scp.ndimage, self.filter)\n- if self.origin is None:\n- origin = (-1, 1, -1, 1)[:a.ndim]\n- else:\n- origin = self.origin\n- return filter(a, w, output=self.output, mode=self.mode,\n- cval=self.cval, origin=origin)\n \n- @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-5, scipy_name='scp')\n- def test_convolve_and_correlate(self, xp, scp):\n- if 1 in self.shape and self.mode == 'mirror':\n- raise unittest.SkipTest(\"requires scipy>1.5.0, tested later\")\n- if self.adtype == self.wdtype or self.adtype == self.output:\n- raise unittest.SkipTest(\"redundant\")\n- a = testing.shaped_random(self.shape, xp, self.adtype)\n- if self.wdtype is None:\n- wdtype = self.adtype\n- else:\n- wdtype = self.wdtype\n- w = testing.shaped_random((self.ksize,) * a.ndim, xp, wdtype)\n- return self._filter(xp, scp, a, w)\n+ # The kwargs to pass to the filter function\n+ kwargs = {param:getattr(self, param)\n+ for param in FilterTestCaseBase.KWARGS_PARAMS\n+ if hasattr(self, param)}\n+ if hasattr(self, 'origin'):\n+ kwargs['origin'] = self._origin\n+\n+ # The array we are filtering\n+ arr = testing.shaped_random(self.shape, xp, self.dtype)\n+\n+ # The weights we are using to filter\n+ wghts = self._get_weights(xp)\n+ if isinstance(wghts, tuple) and len(wghts) == 2 and wghts[0] is None:\n+ # w is actually a tuple of (None, footprint)\n+ wghts, kwargs['footprint'] = wghts\n+\n+ # Actually perform filtering\n+ return filter(arr, wghts, **kwargs)\n+\n+\n+ def _get_weights(self, xp):\n+ # Gets the second argument to the filter functions.\n+ # For convolve/correlate/convolve1d/correlate1d this is the weights.\n+ # For minimum_filter1d/maximum_filter1d this is the kernel size.\n+ #\n+ # For minimum_filter/maximum_filter this is a bit more complicated and\n+ # is either the kernel size or a tuple of None and the footprint. The\n+ # _filter() method knows about this and handles it automatically.\n+\n+ if self.filter in ('convolve', 'correlate'):\n+ return testing.shaped_random(self._kshape, xp, self._dtype)\n+\n+ if self.filter in ('convolve1d', 'correlate1d'):\n+ return testing.shaped_random((self.ksize,), xp, self._dtype)\n+\n+ if self.filter in ('minimum_filter', 'maximum_filter'):\n+ if not self.footprint:\n+ return self.ksize\n+ kshape = self._kshape\n+ footprint = testing.shaped_random(kshape, xp, scale=1) > 0.5\n+ if not footprint.any():\n+ footprint = xp.ones(kshape)\n+ return None, footprint\n \n+ if self.filter in ('minimum_filter1d', 'maximum_filter1d'):\n+ return self.ksize\n \n-@testing.parameterize(*testing.product({\n- 'shape': [(1, 2, 3, 4)],\n+ raise RuntimeError('unsupported filter name')\n+\n+\n+ @property\n+ def _dtype(self):\n+ return getattr(self, 'wdtype', None) or self.dtype\n+\n+\n+ @property\n+ def _ndim(self):\n+ return getattr(self, 'ndim', len(getattr(self, 'shape', [])))\n+\n+\n+ @property\n+ def _kshape(self):\n+ return getattr(self, 'kshape', (self.ksize,) * self._ndim)\n+\n+\n+ @property\n+ def _origin(self):\n+ origin = getattr(self, 'origin', 0)\n+ if origin is not None:\n+ return origin\n+ is_1d = self.filter.endswith('1d')\n+ return -1 if is_1d else (-1, 1, -1, 1)[:self._ndim]\n+\n+\n+# Parameters common across all modes (with some overrides)\n+COMMON_PARAMS = {\n+ 'shape': [(4, 5), (3, 4, 5), (1, 3, 4, 5)],\n 'ksize': [3, 4],\n 'dtype': [numpy.int32, numpy.float64],\n- 'filter': ['convolve', 'correlate']\n-}))\n-@testing.gpu\n-# SciPy behavior fixed in 1.5.0: https://github.com/scipy/scipy/issues/11661\n-@testing.with_requires('scipy>=1.5.0')\n-class TestConvolveAndCorrelateMirrorDim1(unittest.TestCase):\n- @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-5, scipy_name='scp')\n- def test_convolve_and_correlate(self, xp, scp):\n- a = testing.shaped_random(self.shape, xp, self.dtype)\n- w = testing.shaped_random((self.ksize,) * a.ndim, xp, self.dtype)\n- filter = getattr(scp.ndimage, self.filter)\n- return filter(a, w, output=None, mode='mirror', cval=0.0, origin=0)\n+}\n \n \n-@testing.parameterize(*testing.product({\n- 'ndim': [2, 3],\n- 'dtype': [numpy.int32, numpy.float64],\n- 'filter': ['convolve', 'correlate']\n-}))\n+# The bulk of the tests are done with this class\n+@testing.parameterize(*(\n+ testing.product([\n+ # Filter-function specific params\n+ testing.product({\n+ 'filter': ['convolve', 'correlate'],\n+ }) + testing.product({\n+ 'filter': ['convolve1d', 'correlate1d',\n+ 'minimum_filter1d', 'maximum_filter1d'],\n+ 'axis': [0, 1, -1],\n+ }) + testing.product({\n+ 'filter': ['minimum_filter', 'maximum_filter'],\n+ 'footprint': [False, True],\n+ }),\n+\n+ # Mode-specific params\n+ testing.product({\n+ **COMMON_PARAMS,\n+ 'mode': ['reflect'],\n+ # With reflect test some of the other parameters as well\n+ 'origin': [0, 1, None],\n+ 'output': [None, numpy.int32, numpy.float64],\n+ 'dtype': [numpy.uint8, numpy.int16, numpy.int32,\n+ numpy.float32, numpy.float64],\n+ }) + testing.product({\n+ **COMMON_PARAMS,\n+ 'mode': ['constant'], 'cval': [-1.0, 0.0, 1.0],\n+ }) + testing.product({\n+ **COMMON_PARAMS,\n+ 'mode': ['nearest', 'wrap'],\n+ }) + testing.product({\n+ **COMMON_PARAMS,\n+ 'shape': [(4, 5), (3, 4, 5)], # no (1,3,4,5) here due to scipy bug\n+ 'mode': ['mirror'],\n+ })\n+ ])\n+))\n @testing.gpu\n @testing.with_requires('scipy')\n-class TestConvolveAndCorrelateSpecialCases(unittest.TestCase):\n+class TestFilter(FilterTestCaseBase):\n+ @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-5, scipy_name='scp')\n+ def test_filter(self, xp, scp):\n+ if self.dtype == getattr(self, 'output', None):\n+ raise unittest.SkipTest(\"redundant\")\n+ return self._filter(xp, scp)\n \n- def _filter(self, scp, a, w, mode='reflect', origin=0):\n- filter = getattr(scp.ndimage, self.filter)\n- return filter(a, w, mode=mode, origin=origin)\n \n+# Tests things requiring scipy >= 1.5.0\n+@testing.parameterize(*(\n+ testing.product([\n+ # Filter-function specific params\n+ testing.product({\n+ 'filter': ['convolve', 'correlate'],\n+ }) + testing.product({\n+ 'filter': ['convolve1d', 'correlate1d',\n+ 'minimum_filter1d', 'maximum_filter1d'],\n+ 'axis': [0, 1, -1],\n+ }) + testing.product({\n+ 'filter': ['minimum_filter', 'maximum_filter'],\n+ 'footprint': [False, True],\n+ }),\n+\n+ # Mode-specific params\n+ testing.product({\n+ **COMMON_PARAMS,\n+ 'shape': [(1, 3, 4, 5)],\n+ 'mode': ['mirror'],\n+ })\n+ ])\n+))\n+@testing.gpu\n+# SciPy behavior fixed in 1.5.0: https://github.com/scipy/scipy/issues/11661\n+@testing.with_requires('scipy>=1.5.0')\n+class TestMirrorWithDim1(FilterTestCaseBase):\n @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-5, scipy_name='scp')\n- def test_weights_with_size_zero_dim(self, xp, scp):\n- a = testing.shaped_random((3, ) * self.ndim, xp, self.dtype)\n- w = testing.shaped_random((0, ) + (3, ) * self.ndim, xp, self.dtype)\n- return self._filter(scp, a, w)\n-\n- def test_invalid_shape_weights(self):\n- a = testing.shaped_random((3, ) * self.ndim, cupy, self.dtype)\n- w = testing.shaped_random((3, ) * (self.ndim - 1), cupy, self.dtype)\n- with self.assertRaises(RuntimeError):\n- self._filter(cupyx.scipy, a, w)\n- w = testing.shaped_random((0, ) + (3, ) * (self.ndim - 1), cupy,\n- self.dtype)\n- with self.assertRaises(RuntimeError):\n- self._filter(cupyx.scipy, a, w)\n-\n- def test_invalid_mode(self):\n- a = testing.shaped_random((3, ) * self.ndim, cupy, self.dtype)\n- w = testing.shaped_random((3, ) * self.ndim, cupy, self.dtype)\n- with self.assertRaises(RuntimeError):\n- self._filter(cupyx.scipy, a, w, mode='unknown')\n-\n- # SciPy behavior fixed in 1.2.0: https://github.com/scipy/scipy/issues/822\n- @testing.with_requires('scipy>=1.2.0')\n- def test_invalid_origin(self):\n- a = testing.shaped_random((3, ) * self.ndim, cupy, self.dtype)\n- for lenw in [3, 4]:\n- w = testing.shaped_random((lenw, ) * self.ndim, cupy, self.dtype)\n- for origin in range(-3, 4):\n- if (lenw // 2 + origin < 0) or (lenw // 2 + origin >= lenw):\n- with self.assertRaises(ValueError):\n- self._filter(cupyx.scipy, a, w, origin=origin)\n- else:\n- self._filter(cupyx.scipy, a, w, origin=origin)\n-\n-\n-# ######### Testing convolve1d and correlate1d ##########\n+ def test_filter(self, xp, scp):\n+ return self._filter(xp, scp)\n \n \n+# Tests with weight dtypes that are distinct from the input and output dtypes\n @testing.parameterize(*(\n- testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'axis': [0, 1, -1],\n- 'mode': ['reflect'],\n- 'cval': [0.0],\n- 'origin': [0, 1, -1],\n- 'adtype': [numpy.int8, numpy.int16, numpy.int32,\n- numpy.float32, numpy.float64],\n- 'wdtype': [None, numpy.int32, numpy.float64],\n- 'output': [None, numpy.int32, numpy.float64],\n- 'filter': ['convolve1d', 'correlate1d']\n- }) + testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'axis': [0, 1, -1],\n- 'mode': ['constant'],\n- 'cval': [-1.0, 0.0, 1.0],\n- 'origin': [0],\n- 'adtype': [numpy.int32, numpy.float64],\n- 'wdtype': [None],\n- 'output': [None],\n- 'filter': ['convolve1d', 'correlate1d']\n- }) + testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'axis': [0, 1, -1],\n- 'mode': ['nearest', 'mirror', 'wrap'],\n- 'cval': [0.0],\n- 'origin': [0],\n- 'adtype': [numpy.int32, numpy.float64],\n- 'wdtype': [None],\n- 'output': [None],\n- 'filter': ['convolve1d', 'correlate1d']\n- })\n+ testing.product([\n+ testing.product({\n+ 'filter': ['convolve', 'correlate'],\n+ }) + testing.product({\n+ 'filter': ['convolve1d', 'correlate1d'],\n+ 'axis': [0, 1, -1],\n+ }),\n+ testing.product({\n+ **COMMON_PARAMS,\n+ 'mode': ['reflect'],\n+ 'output': [None, numpy.int32, numpy.float64],\n+ 'dtype': [numpy.uint8, numpy.int16, numpy.int32,\n+ numpy.float32, numpy.float64],\n+ 'wdtype': [numpy.int32, numpy.float64],\n+ })\n+ ])\n ))\n @testing.gpu\n @testing.with_requires('scipy')\n-class TestConvolve1DAndCorrelate1D(unittest.TestCase):\n-\n- def _filter(self, xp, scp, a, w):\n- filter = getattr(scp.ndimage, self.filter)\n- return filter(a, w, axis=self.axis, output=self.output, mode=self.mode,\n- cval=self.cval, origin=self.origin)\n-\n+class TestWeightDtype(FilterTestCaseBase):\n @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-5, scipy_name='scp')\n- def test_convolve1d_and_correlate1d(self, xp, scp):\n- if 1 in self.shape and self.mode == 'mirror':\n- raise unittest.SkipTest(\"requires scipy>1.5.0, tested later\")\n- if self.adtype == self.wdtype or self.adtype == self.output:\n+ def test_filter(self, xp, scp):\n+ if self.dtype == self.wdtype:\n raise unittest.SkipTest(\"redundant\")\n- a = testing.shaped_random(self.shape, xp, self.adtype)\n- if self.wdtype is None:\n- wdtype = self.adtype\n- else:\n- wdtype = self.wdtype\n- w = testing.shaped_random((self.ksize,), xp, wdtype)\n- return self._filter(xp, scp, a, w)\n+ return self._filter(xp, scp)\n \n \n+# Tests special weights (ND)\n @testing.parameterize(*testing.product({\n- 'shape': [(1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'axis': [0, 1, -1],\n+ 'filter': ['convolve', 'correlate', 'minimum_filter', 'maximum_filter'],\n+ 'shape': [(3, 3), (3, 3, 3)],\n 'dtype': [numpy.int32, numpy.float64],\n- 'filter': ['convolve1d', 'correlate1d']\n }))\n @testing.gpu\n-# SciPy behavior fixed in 1.5.0: https://github.com/scipy/scipy/issues/11661\n-@testing.with_requires('scipy>=1.5.0')\n-class TestConvolveAndCorrelateMirrorDim1(unittest.TestCase):\n+@testing.with_requires('scipy')\n+class TestSpecialWeightCases(FilterTestCaseBase):\n @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-5, scipy_name='scp')\n- def test_convolve_and_correlate(self, xp, scp):\n- a = testing.shaped_random(self.shape, xp, self.dtype)\n- w = testing.shaped_random((self.ksize,) * a.ndim, xp, self.dtype)\n- filter = getattr(scp.ndimage, self.filter)\n- return filter(a, w, axis=self.axis, output=None, mode='mirror',\n- cval=0.0, origin=0)\n+ #@testing.numpy_cupy_raises(scipy_name='scp', accept_error=ValueError)\n+ def test_extra_0_dim(self, xp, scp):\n+ # NOTE: minimum/maximum_filter raise ValueError but convolve/correlate\n+ # return an array of zeroes the same shape as the input. This will\n+ # handle both and only pass is both numpy and cupy do the same thing.\n+ self.kshape = (0,) + self.shape\n+ try:\n+ return self._filter(xp, scp)\n+ except ValueError:\n+ return xp.zeros((0,)) #xp.zeros(self.shape)\n+\n \n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=RuntimeError)\n+ def test_missing_dim(self, xp, scp):\n+ self.kshape = self.shape[1:]\n+ return self._filter(xp, scp)\n \n+\n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=RuntimeError)\n+ def test_extra_dim(self, xp, scp):\n+ self.kshape = self.shape[:1] + self.shape\n+ return self._filter(xp, scp)\n+\n+\n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=(RuntimeError,\n+ ValueError))\n+ def test_replace_dim_with_0(self, xp, scp):\n+ self.kshape = (0,) + self.shape[1:]\n+ return self._filter(xp, scp)\n+\n+\n+# Tests special weights (1D)\n @testing.parameterize(*testing.product({\n- 'ndim': [2, 3],\n+ 'filter': ['convolve1d', 'correlate1d',\n+ 'minimum_filter1d', 'maximum_filter1d'],\n+ 'shape': [(3, 3), (3, 3, 3)],\n 'dtype': [numpy.int32, numpy.float64],\n- 'filter': ['convolve1d', 'correlate1d']\n }))\n @testing.gpu\n @testing.with_requires('scipy')\n-class TestConvolve1DAndCorrelate1DSpecialCases(unittest.TestCase):\n+class TestSpecialCases1D(FilterTestCaseBase):\n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=RuntimeError)\n+ def test_0_dim(self, xp, scp):\n+ self.ksize = 0\n+ return self._filter(xp, scp)\n \n- def _filter(self, scp, a, w, mode='reflect', origin=0):\n- filter = getattr(scp.ndimage, self.filter)\n- return filter(a, w, mode=mode, origin=origin)\n-\n- def test_weights_with_size_zero_dim(self):\n- a = testing.shaped_random((3, ) * self.ndim, cupy, self.dtype)\n- w = testing.shaped_random((0, 3), cupy, self.dtype)\n- with self.assertRaises(RuntimeError):\n- self._filter(cupyx.scipy, a, w)\n-\n- def test_invalid_shape_weights(self):\n- a = testing.shaped_random((3, ) * self.ndim, cupy, self.dtype)\n- w = testing.shaped_random((3, 3), cupy, self.dtype)\n- with self.assertRaises(RuntimeError):\n- self._filter(cupyx.scipy, a, w)\n- w = testing.shaped_random((0, ), cupy,\n- self.dtype)\n- with self.assertRaises(RuntimeError):\n- self._filter(cupyx.scipy, a, w)\n-\n- def test_invalid_mode(self):\n- a = testing.shaped_random((3, ) * self.ndim, cupy, self.dtype)\n- w = testing.shaped_random((3,), cupy, self.dtype)\n- with self.assertRaises(RuntimeError):\n- self._filter(cupyx.scipy, a, w, mode='unknown')\n-\n- # SciPy behavior fixed in 1.2.0: https://github.com/scipy/scipy/issues/822\n- @testing.with_requires('scipy>=1.2.0')\n- def test_invalid_origin(self):\n- a = testing.shaped_random((3, ) * self.ndim, cupy, self.dtype)\n- for lenw in [3, 4]:\n- w = testing.shaped_random((lenw, ), cupy, self.dtype)\n- for origin in range(-3, 4):\n- if (lenw // 2 + origin < 0) or (lenw // 2 + origin >= lenw):\n- with self.assertRaises(ValueError):\n- self._filter(cupyx.scipy, a, w, origin=origin)\n- else:\n- self._filter(cupyx.scipy, a, w, origin=origin)\n-\n-\n-# ######### Testing minimum_filter and maximum_filter ##########\n \n+# Tests invalid axis value\n @testing.parameterize(*testing.product({\n- 'size': [3, 4],\n- 'footprint': [None, 'random'],\n- 'mode': ['reflect', 'constant', 'nearest', 'mirror', 'wrap'],\n- 'origin': [0, None],\n- 'x_dtype': [numpy.int32, numpy.float32],\n- 'output': [None, numpy.float64],\n- 'filter': ['minimum_filter', 'maximum_filter']\n+ 'filter': ['convolve1d', 'correlate1d',\n+ 'minimum_filter1d', 'maximum_filter1d'],\n+ 'shape': [(4, 5), (3, 4, 5), (1, 3, 4, 5)],\n }))\n @testing.gpu\n @testing.with_requires('scipy')\n-class TestMinimumMaximumFilter(unittest.TestCase):\n-\n- shape = (4, 5)\n- cval = 0.0\n-\n- def _filter(self, xp, scp, x):\n- filter = getattr(scp.ndimage, self.filter)\n- if self.origin is None:\n- origin = (-1, 1, -1, 1)[:x.ndim]\n- else:\n- origin = self.origin\n- if self.footprint is None:\n- size, footprint = self.size, None\n- else:\n- size = None\n- shape = (self.size, ) * x.ndim\n- footprint = testing.shaped_random(shape, xp, scale=1) > .5\n- if not footprint.any():\n- footprint = xp.ones(shape)\n- return filter(x, size=size, footprint=footprint,\n- output=self.output, mode=self.mode, cval=self.cval,\n- origin=origin)\n-\n- @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-5, scipy_name='scp')\n- def test_minimum_and_maximum_filter(self, xp, scp):\n- x = testing.shaped_random(self.shape, xp, self.x_dtype)\n- return self._filter(xp, scp, x)\n-\n-\n-# ######### Testing minimum_filter1d and maximum_filter1d ##########\n-\n-\n-@testing.parameterize(*(\n- testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'axis': [0, 1, -1],\n- 'mode': ['reflect'],\n- 'cval': [0.0],\n- 'origin': [0, 1, -1],\n- 'wdtype': [numpy.int32, numpy.float64],\n- 'output': [None, numpy.int32, numpy.float64],\n- 'filter': ['minimum_filter1d', 'maximum_filter1d']\n- }) + testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'axis': [0, 1, -1],\n- 'mode': ['constant'],\n- 'cval': [-1.0, 0.0, 1.0],\n- 'origin': [0],\n- 'wdtype': [numpy.int32, numpy.float64],\n- 'output': [None],\n- 'filter': ['minimum_filter1d', 'maximum_filter1d']\n- }) + testing.product({\n- 'shape': [(3, 4), (2, 3, 4), (1, 2, 3, 4)],\n- 'ksize': [3, 4],\n- 'axis': [0, 1, -1],\n- 'mode': ['nearest', 'mirror', 'wrap'],\n- 'cval': [0.0],\n- 'origin': [0],\n- 'wdtype': [numpy.int32, numpy.float64],\n- 'output': [None],\n- 'filter': ['minimum_filter1d', 'maximum_filter1d']\n- })\n-))\n+class TestInvalidAxis(FilterTestCaseBase):\n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=ValueError)\n+ def test_invalid_axis_pos(self, xp, scp):\n+ self.axis = len(self.shape)\n+ try:\n+ return self._filter(xp, scp)\n+ except numpy.AxisError:\n+ # numpy.AxisError is a subclass of ValueError\n+ # currently cupyx is raising numpy.AxisError but scipy is still\n+ # raising ValueError\n+ raise ValueError('invalid axis')\n+\n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=ValueError)\n+ def test_invalid_axis_neg(self, xp, scp):\n+ self.axis = -len(self.shape) - 1\n+ try:\n+ return self._filter(xp, scp)\n+ except numpy.AxisError:\n+ raise ValueError('invalid axis')\n+\n+\n+# Tests invalid mode value\n+@testing.parameterize(*testing.product({\n+ 'filter': ['convolve', 'correlate',\n+ 'convolve1d', 'correlate1d',\n+ 'minimum_filter', 'maximum_filter',\n+ 'minimum_filter1d', 'maximum_filter1d'],\n+ 'mode': ['unknown'],\n+ 'shape': [(4, 5)],\n+}))\n @testing.gpu\n @testing.with_requires('scipy')\n-class TestMinimumMaximum1DFilter(unittest.TestCase):\n- def _filter(self, xp, scp, a, w):\n- filter = getattr(scp.ndimage, self.filter)\n- return filter(a, w, axis=self.axis, output=self.output, mode=self.mode,\n- cval=self.cval, origin=self.origin)\n+class TestInvalidMode(FilterTestCaseBase):\n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=RuntimeError)\n+ def test_invalid_mode(self, xp, scp):\n+ return self._filter(xp, scp)\n \n- @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-5, scipy_name='scp')\n- def test_convolve1d_and_correlate1d(self, xp, scp):\n- a = testing.shaped_random(self.shape, xp, self.x_dtype)\n- w = testing.shaped_random((self.ksize,), xp, self.x_dtype)\n- return self._filter(xp, scp, a, w)\n+\n+# Tests invalid origin values\n+@testing.parameterize(*testing.product({\n+ 'filter': ['convolve', 'correlate',\n+ 'convolve1d', 'correlate1d',\n+ 'minimum_filter', 'maximum_filter',\n+ 'minimum_filter1d', 'maximum_filter1d'],\n+ 'ksize': [3, 4],\n+ 'shape': [(4, 5)], 'dtype': [numpy.float64],\n+}))\n+@testing.gpu\n+# SciPy behavior fixed in 1.2.0: https://github.com/scipy/scipy/issues/822\n+@testing.with_requires('scipy>=1.2.0')\n+class TestInvalidOrigin(FilterTestCaseBase):\n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=ValueError)\n+ def test_invalid_origin_neg(self, xp, scp):\n+ self.origin = -self.ksize // 2 - 1\n+ return self._filter(xp, scp)\n+\n+ @testing.numpy_cupy_raises(scipy_name='scp', accept_error=ValueError)\n+ def test_invalid_origin_pos(self, xp, scp):\n+ self.origin = self.ksize - self.ksize // 2\n+ return self._filter(xp, scp)\n" } ]
dad51485282b6e05c4993b0733bd54aa3c0bacef
cupy/cupy
12.01.2021 16:21:46
MIT License
Use "import numpy as np" in the array_api submodule This avoids importing everything inside the individual functions, but still is preferred over importing the functions used explicitly, as most of them clash with the wrapper function names.
[ { "change_type": "MODIFY", "old_path": "numpy/_array_api/_creation_functions.py", "new_path": "numpy/_array_api/_creation_functions.py", "diff": "@@ -1,76 +1,67 @@\n+import numpy as np\n+\n def arange(start, /, *, stop=None, step=1, dtype=None, device=None):\n- from .. import arange\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return arange(start, stop=stop, step=step, dtype=dtype)\n+ return np.arange(start, stop=stop, step=step, dtype=dtype)\n \n def empty(shape, /, *, dtype=None, device=None):\n- from .. import empty\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return empty(shape, dtype=dtype)\n+ return np.empty(shape, dtype=dtype)\n \n def empty_like(x, /, *, dtype=None, device=None):\n- from .. import empty_like\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return empty_like(x, dtype=dtype)\n+ return np.empty_like(x, dtype=dtype)\n \n def eye(N, /, *, M=None, k=0, dtype=None, device=None):\n- from .. import eye\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return eye(N, M=M, k=k, dtype=dtype)\n+ return np.eye(N, M=M, k=k, dtype=dtype)\n \n def full(shape, fill_value, /, *, dtype=None, device=None):\n- from .. import full\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return full(shape, fill_value, dtype=dtype)\n+ return np.full(shape, fill_value, dtype=dtype)\n \n def full_like(x, fill_value, /, *, dtype=None, device=None):\n- from .. import full_like\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return full_like(x, fill_value, dtype=dtype)\n+ return np.full_like(x, fill_value, dtype=dtype)\n \n def linspace(start, stop, num, /, *, dtype=None, device=None, endpoint=True):\n- from .. import linspace\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return linspace(start, stop, num, dtype=dtype, endpoint=endpoint)\n+ return np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint)\n \n def ones(shape, /, *, dtype=None, device=None):\n- from .. import ones\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return ones(shape, dtype=dtype)\n+ return np.ones(shape, dtype=dtype)\n \n def ones_like(x, /, *, dtype=None, device=None):\n- from .. import ones_like\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return ones_like(x, dtype=dtype)\n+ return np.ones_like(x, dtype=dtype)\n \n def zeros(shape, /, *, dtype=None, device=None):\n- from .. import zeros\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return zeros(shape, dtype=dtype)\n+ return np.zeros(shape, dtype=dtype)\n \n def zeros_like(x, /, *, dtype=None, device=None):\n- from .. import zeros_like\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return zeros_like(x, dtype=dtype)\n+ return np.zeros_like(x, dtype=dtype)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_elementwise_functions.py", "new_path": "numpy/_array_api/_elementwise_functions.py", "diff": "@@ -1,230 +1,177 @@\n+import numpy as np\n+\n def abs(x, /):\n- from .. import abs\n- return abs(x)\n+ return np.abs(x)\n \n def acos(x, /):\n # Note: the function name is different here\n- from .. import arccos\n- return arccos(x)\n+ return np.arccos(x)\n \n def acosh(x, /):\n # Note: the function name is different here\n- from .. import arccosh\n- return arccosh(x)\n+ return np.arccosh(x)\n \n def add(x1, x2, /):\n- from .. import add\n- return add(x1, x2)\n+ return np.add(x1, x2)\n \n def asin(x, /):\n # Note: the function name is different here\n- from .. import arcsin\n- return arcsin(x)\n+ return np.arcsin(x)\n \n def asinh(x, /):\n # Note: the function name is different here\n- from .. import arcsinh\n- return arcsinh(x)\n+ return np.arcsinh(x)\n \n def atan(x, /):\n # Note: the function name is different here\n- from .. import arctan\n- return arctan(x)\n+ return np.arctan(x)\n \n def atan2(x1, x2, /):\n # Note: the function name is different here\n- from .. import arctan2\n- return arctan2(x1, x2)\n+ return np.arctan2(x1, x2)\n \n def atanh(x, /):\n # Note: the function name is different here\n- from .. import arctanh\n- return arctanh(x)\n+ return np.arctanh(x)\n \n def bitwise_and(x1, x2, /):\n- from .. import bitwise_and\n- return bitwise_and(x1, x2)\n+ return np.bitwise_and(x1, x2)\n \n def bitwise_left_shift(x1, x2, /):\n # Note: the function name is different here\n- from .. import left_shift\n- return left_shift(x1, x2)\n+ return np.left_shift(x1, x2)\n \n def bitwise_invert(x, /):\n # Note: the function name is different here\n- from .. import invert\n- return invert(x)\n+ return np.invert(x)\n \n def bitwise_or(x1, x2, /):\n- from .. import bitwise_or\n- return bitwise_or(x1, x2)\n+ return np.bitwise_or(x1, x2)\n \n def bitwise_right_shift(x1, x2, /):\n # Note: the function name is different here\n- from .. import right_shift\n- return right_shift(x1, x2)\n+ return np.right_shift(x1, x2)\n \n def bitwise_xor(x1, x2, /):\n- from .. import bitwise_xor\n- return bitwise_xor(x1, x2)\n+ return np.bitwise_xor(x1, x2)\n \n def ceil(x, /):\n- from .. import ceil\n- return ceil(x)\n+ return np.ceil(x)\n \n def cos(x, /):\n- from .. import cos\n- return cos(x)\n+ return np.cos(x)\n \n def cosh(x, /):\n- from .. import cosh\n- return cosh(x)\n+ return np.cosh(x)\n \n def divide(x1, x2, /):\n- from .. import divide\n- return divide(x1, x2)\n+ return np.divide(x1, x2)\n \n def equal(x1, x2, /):\n- from .. import equal\n- return equal(x1, x2)\n+ return np.equal(x1, x2)\n \n def exp(x, /):\n- from .. import exp\n- return exp(x)\n+ return np.exp(x)\n \n def expm1(x, /):\n- from .. import expm1\n- return expm1(x)\n+ return np.expm1(x)\n \n def floor(x, /):\n- from .. import floor\n- return floor(x)\n+ return np.floor(x)\n \n def floor_divide(x1, x2, /):\n- from .. import floor_divide\n- return floor_divide(x1, x2)\n+ return np.floor_divide(x1, x2)\n \n def greater(x1, x2, /):\n- from .. import greater\n- return greater(x1, x2)\n+ return np.greater(x1, x2)\n \n def greater_equal(x1, x2, /):\n- from .. import greater_equal\n- return greater_equal(x1, x2)\n+ return np.greater_equal(x1, x2)\n \n def isfinite(x, /):\n- from .. import isfinite\n- return isfinite(x)\n+ return np.isfinite(x)\n \n def isinf(x, /):\n- from .. import isinf\n- return isinf(x)\n+ return np.isinf(x)\n \n def isnan(x, /):\n- from .. import isnan\n- return isnan(x)\n+ return np.isnan(x)\n \n def less(x1, x2, /):\n- from .. import less\n- return less(x1, x2)\n+ return np.less(x1, x2)\n \n def less_equal(x1, x2, /):\n- from .. import less_equal\n- return less_equal(x1, x2)\n+ return np.less_equal(x1, x2)\n \n def log(x, /):\n- from .. import log\n- return log(x)\n+ return np.log(x)\n \n def log1p(x, /):\n- from .. import log1p\n- return log1p(x)\n+ return np.log1p(x)\n \n def log2(x, /):\n- from .. import log2\n- return log2(x)\n+ return np.log2(x)\n \n def log10(x, /):\n- from .. import log10\n- return log10(x)\n+ return np.log10(x)\n \n def logical_and(x1, x2, /):\n- from .. import logical_and\n- return logical_and(x1, x2)\n+ return np.logical_and(x1, x2)\n \n def logical_not(x, /):\n- from .. import logical_not\n- return logical_not(x)\n+ return np.logical_not(x)\n \n def logical_or(x1, x2, /):\n- from .. import logical_or\n- return logical_or(x1, x2)\n+ return np.logical_or(x1, x2)\n \n def logical_xor(x1, x2, /):\n- from .. import logical_xor\n- return logical_xor(x1, x2)\n+ return np.logical_xor(x1, x2)\n \n def multiply(x1, x2, /):\n- from .. import multiply\n- return multiply(x1, x2)\n+ return np.multiply(x1, x2)\n \n def negative(x, /):\n- from .. import negative\n- return negative(x)\n+ return np.negative(x)\n \n def not_equal(x1, x2, /):\n- from .. import not_equal\n- return not_equal(x1, x2)\n+ return np.not_equal(x1, x2)\n \n def positive(x, /):\n- from .. import positive\n- return positive(x)\n+ return np.positive(x)\n \n def pow(x1, x2, /):\n # Note: the function name is different here\n- from .. import power\n- return power(x1, x2)\n+ return np.power(x1, x2)\n \n def remainder(x1, x2, /):\n- from .. import remainder\n- return remainder(x1, x2)\n+ return np.remainder(x1, x2)\n \n def round(x, /):\n- from .. import round\n- return round(x)\n+ return np.round(x)\n \n def sign(x, /):\n- from .. import sign\n- return sign(x)\n+ return np.sign(x)\n \n def sin(x, /):\n- from .. import sin\n- return sin(x)\n+ return np.sin(x)\n \n def sinh(x, /):\n- from .. import sinh\n- return sinh(x)\n+ return np.sinh(x)\n \n def square(x, /):\n- from .. import square\n- return square(x)\n+ return np.square(x)\n \n def sqrt(x, /):\n- from .. import sqrt\n- return sqrt(x)\n+ return np.sqrt(x)\n \n def subtract(x1, x2, /):\n- from .. import subtract\n- return subtract(x1, x2)\n+ return np.subtract(x1, x2)\n \n def tan(x, /):\n- from .. import tan\n- return tan(x)\n+ return np.tan(x)\n \n def tanh(x, /):\n- from .. import tanh\n- return tanh(x)\n+ return np.tanh(x)\n \n def trunc(x, /):\n- from .. import trunc\n- return trunc(x)\n+ return np.trunc(x)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_linear_algebra_functions.py", "new_path": "numpy/_array_api/_linear_algebra_functions.py", "diff": "@@ -1,93 +1,73 @@\n+import numpy as np\n+\n # def cholesky():\n-# from .. import cholesky\n-# return cholesky()\n+# return np.cholesky()\n \n def cross(x1, x2, /, *, axis=-1):\n- from .. import cross\n- return cross(x1, x2, axis=axis)\n+ return np.cross(x1, x2, axis=axis)\n \n def det(x, /):\n # Note: this function is being imported from a nondefault namespace\n- from ..linalg import det\n- return det(x)\n+ return np.det(x)\n \n def diagonal(x, /, *, axis1=0, axis2=1, offset=0):\n- from .. import diagonal\n- return diagonal(x, axis1=axis1, axis2=axis2, offset=offset)\n+ return np.diagonal(x, axis1=axis1, axis2=axis2, offset=offset)\n \n # def dot():\n-# from .. import dot\n-# return dot()\n+# return np.dot()\n #\n # def eig():\n-# from .. import eig\n-# return eig()\n+# return np.eig()\n #\n # def eigvalsh():\n-# from .. import eigvalsh\n-# return eigvalsh()\n+# return np.eigvalsh()\n #\n # def einsum():\n-# from .. import einsum\n-# return einsum()\n+# return np.einsum()\n \n def inv(x):\n # Note: this function is being imported from a nondefault namespace\n- from ..linalg import inv\n- return inv(x)\n+ return np.inv(x)\n \n # def lstsq():\n-# from .. import lstsq\n-# return lstsq()\n+# return np.lstsq()\n #\n # def matmul():\n-# from .. import matmul\n-# return matmul()\n+# return np.matmul()\n #\n # def matrix_power():\n-# from .. import matrix_power\n-# return matrix_power()\n+# return np.matrix_power()\n #\n # def matrix_rank():\n-# from .. import matrix_rank\n-# return matrix_rank()\n+# return np.matrix_rank()\n \n def norm(x, /, *, axis=None, keepdims=False, ord=None):\n # Note: this function is being imported from a nondefault namespace\n- from ..linalg import norm\n # Note: this is different from the default behavior\n if axis == None and x.ndim > 2:\n x = x.flatten()\n- return norm(x, axis=axis, keepdims=keepdims, ord=ord)\n+ return np.norm(x, axis=axis, keepdims=keepdims, ord=ord)\n \n def outer(x1, x2, /):\n- from .. import outer\n- return outer(x1, x2)\n+ return np.outer(x1, x2)\n \n # def pinv():\n-# from .. import pinv\n-# return pinv()\n+# return np.pinv()\n #\n # def qr():\n-# from .. import qr\n-# return qr()\n+# return np.qr()\n #\n # def slogdet():\n-# from .. import slogdet\n-# return slogdet()\n+# return np.slogdet()\n #\n # def solve():\n-# from .. import solve\n-# return solve()\n+# return np.solve()\n #\n # def svd():\n-# from .. import svd\n-# return svd()\n+# return np.svd()\n \n def trace(x, /, *, axis1=0, axis2=1, offset=0):\n- from .. import trace\n- return trace(x, axis1=axis1, axis2=axis2, offset=offset)\n+ return np.trace(x, axis1=axis1, axis2=axis2, offset=offset)\n \n def transpose(x, /, *, axes=None):\n- from .. import transpose\n- return transpose(x, axes=axes)\n+ return np.transpose(x, axes=axes)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_manipulation_functions.py", "new_path": "numpy/_array_api/_manipulation_functions.py", "diff": "@@ -1,28 +1,23 @@\n+import numpy as np\n+\n def concat(arrays, /, *, axis=0):\n # Note: the function name is different here\n- from .. import concatenate\n- return concatenate(arrays, axis=axis)\n+ return np.concatenate(arrays, axis=axis)\n \n def expand_dims(x, axis, /):\n- from .. import expand_dims\n- return expand_dims(x, axis)\n+ return np.expand_dims(x, axis)\n \n def flip(x, /, *, axis=None):\n- from .. import flip\n- return flip(x, axis=axis)\n+ return np.flip(x, axis=axis)\n \n def reshape(x, shape, /):\n- from .. import reshape\n- return reshape(x, shape)\n+ return np.reshape(x, shape)\n \n def roll(x, shift, /, *, axis=None):\n- from .. import roll\n- return roll(x, shift, axis=axis)\n+ return np.roll(x, shift, axis=axis)\n \n def squeeze(x, /, *, axis=None):\n- from .. import squeeze\n- return squeeze(x, axis=axis)\n+ return np.squeeze(x, axis=axis)\n \n def stack(arrays, /, *, axis=0):\n- from .. import stack\n- return stack(arrays, axis=axis)\n+ return np.stack(arrays, axis=axis)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_searching_functions.py", "new_path": "numpy/_array_api/_searching_functions.py", "diff": "@@ -1,15 +1,13 @@\n+import numpy as np\n+\n def argmax(x, /, *, axis=None, keepdims=False):\n- from .. import argmax\n- return argmax(x, axis=axis, keepdims=keepdims)\n+ return np.argmax(x, axis=axis, keepdims=keepdims)\n \n def argmin(x, /, *, axis=None, keepdims=False):\n- from .. import argmin\n- return argmin(x, axis=axis, keepdims=keepdims)\n+ return np.argmin(x, axis=axis, keepdims=keepdims)\n \n def nonzero(x, /):\n- from .. import nonzero\n- return nonzero(x)\n+ return np.nonzero(x)\n \n def where(condition, x1, x2, /):\n- from .. import where\n- return where(condition, x1, x2)\n+ return np.where(condition, x1, x2)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_set_functions.py", "new_path": "numpy/_array_api/_set_functions.py", "diff": "@@ -1,3 +1,4 @@\n+import numpy as np\n+\n def unique(x, /, *, return_counts=False, return_index=False, return_inverse=False, sorted=True):\n- from .. import unique\n- return unique(x, return_counts=return_counts, return_index=return_index, return_inverse=return_inverse, sorted=sorted)\n+ return np.unique(x, return_counts=return_counts, return_index=return_index, return_inverse=return_inverse, sorted=sorted)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_sorting_functions.py", "new_path": "numpy/_array_api/_sorting_functions.py", "diff": "@@ -1,19 +1,17 @@\n+import numpy as np\n+\n def argsort(x, /, *, axis=-1, descending=False, stable=True):\n- from .. import argsort\n- from .. import flip\n # Note: this keyword argument is different, and the default is different.\n kind = 'stable' if stable else 'quicksort'\n- res = argsort(x, axis=axis, kind=kind)\n+ res = np.argsort(x, axis=axis, kind=kind)\n if descending:\n- res = flip(res, axis=axis)\n+ res = np.flip(res, axis=axis)\n return res\n \n def sort(x, /, *, axis=-1, descending=False, stable=True):\n- from .. import sort\n- from .. import flip\n # Note: this keyword argument is different, and the default is different.\n kind = 'stable' if stable else 'quicksort'\n- res = sort(x, axis=axis, kind=kind)\n+ res = np.sort(x, axis=axis, kind=kind)\n if descending:\n- res = flip(res, axis=axis)\n+ res = np.flip(res, axis=axis)\n return res\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_statistical_functions.py", "new_path": "numpy/_array_api/_statistical_functions.py", "diff": "@@ -1,29 +1,24 @@\n+import numpy as np\n+\n def max(x, /, *, axis=None, keepdims=False):\n- from .. import max\n- return max(x, axis=axis, keepdims=keepdims)\n+ return np.max(x, axis=axis, keepdims=keepdims)\n \n def mean(x, /, *, axis=None, keepdims=False):\n- from .. import mean\n- return mean(x, axis=axis, keepdims=keepdims)\n+ return np.mean(x, axis=axis, keepdims=keepdims)\n \n def min(x, /, *, axis=None, keepdims=False):\n- from .. import min\n- return min(x, axis=axis, keepdims=keepdims)\n+ return np.min(x, axis=axis, keepdims=keepdims)\n \n def prod(x, /, *, axis=None, keepdims=False):\n- from .. import prod\n- return prod(x, axis=axis, keepdims=keepdims)\n+ return np.prod(x, axis=axis, keepdims=keepdims)\n \n def std(x, /, *, axis=None, correction=0.0, keepdims=False):\n- from .. import std\n # Note: the keyword argument correction is different here\n- return std(x, axis=axis, ddof=correction, keepdims=keepdims)\n+ return np.std(x, axis=axis, ddof=correction, keepdims=keepdims)\n \n def sum(x, /, *, axis=None, keepdims=False):\n- from .. import sum\n- return sum(x, axis=axis, keepdims=keepdims)\n+ return np.sum(x, axis=axis, keepdims=keepdims)\n \n def var(x, /, *, axis=None, correction=0.0, keepdims=False):\n- from .. import var\n # Note: the keyword argument correction is different here\n- return var(x, axis=axis, ddof=correction, keepdims=keepdims)\n+ return np.var(x, axis=axis, ddof=correction, keepdims=keepdims)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_utility_functions.py", "new_path": "numpy/_array_api/_utility_functions.py", "diff": "@@ -1,7 +1,7 @@\n+import numpy as np\n+\n def all(x, /, *, axis=None, keepdims=False):\n- from .. import all\n- return all(x, axis=axis, keepdims=keepdims)\n+ return np.all(x, axis=axis, keepdims=keepdims)\n \n def any(x, /, *, axis=None, keepdims=False):\n- from .. import any\n- return any(x, axis=axis, keepdims=keepdims)\n+ return np.any(x, axis=axis, keepdims=keepdims)\n" } ]
76eb888612183768d9e1b0c818fcf5416c5f28c7
cupy/cupy
20.01.2021 18:25:20
MIT License
Use _implementation on all functions that have it in the array API submodule That way they only work on actual ndarray inputs, not array-like, which is more inline with the spec.
[ { "change_type": "MODIFY", "old_path": "numpy/_array_api/_creation_functions.py", "new_path": "numpy/_array_api/_creation_functions.py", "diff": "@@ -35,7 +35,7 @@ def empty_like(x: array, /, *, dtype: Optional[dtype] = None, device: Optional[d\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return np.empty_like(x, dtype=dtype)\n+ return np.empty_like._implementation(x, dtype=dtype)\n \n def eye(N: int, /, *, M: Optional[int] = None, k: Optional[int] = 0, dtype: Optional[dtype] = None, device: Optional[device] = None) -> array:\n \"\"\"\n@@ -68,7 +68,7 @@ def full_like(x: array, fill_value: Union[int, float], /, *, dtype: Optional[dty\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return np.full_like(x, fill_value, dtype=dtype)\n+ return np.full_like._implementation(x, fill_value, dtype=dtype)\n \n def linspace(start: Union[int, float], stop: Union[int, float], num: int, /, *, dtype: Optional[dtype] = None, device: Optional[device] = None, endpoint: bool = True) -> array:\n \"\"\"\n@@ -101,7 +101,7 @@ def ones_like(x: array, /, *, dtype: Optional[dtype] = None, device: Optional[de\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return np.ones_like(x, dtype=dtype)\n+ return np.ones_like._implementation(x, dtype=dtype)\n \n def zeros(shape: Union[int, Tuple[int, ...]], /, *, dtype: Optional[dtype] = None, device: Optional[device] = None) -> array:\n \"\"\"\n@@ -123,4 +123,4 @@ def zeros_like(x: array, /, *, dtype: Optional[dtype] = None, device: Optional[d\n if device is not None:\n # Note: Device support is not yet implemented on ndarray\n raise NotImplementedError(\"Device support is not yet implemented\")\n- return np.zeros_like(x, dtype=dtype)\n+ return np.zeros_like._implementation(x, dtype=dtype)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_elementwise_functions.py", "new_path": "numpy/_array_api/_elementwise_functions.py", "diff": "@@ -381,7 +381,7 @@ def round(x: array, /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.round(x)\n+ return np.round._implementation(x)\n \n def sign(x: array, /) -> array:\n \"\"\"\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_linear_algebra_functions.py", "new_path": "numpy/_array_api/_linear_algebra_functions.py", "diff": "@@ -18,7 +18,7 @@ def cross(x1: array, x2: array, /, *, axis: int = -1) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.cross(x1, x2, axis=axis)\n+ return np.cross._implementation(x1, x2, axis=axis)\n \n def det(x: array, /) -> array:\n \"\"\"\n@@ -35,7 +35,7 @@ def diagonal(x: array, /, *, axis1: int = 0, axis2: int = 1, offset: int = 0) ->\n \n See its docstring for more information.\n \"\"\"\n- return np.diagonal(x, axis1=axis1, axis2=axis2, offset=offset)\n+ return np.diagonal._implementation(x, axis1=axis1, axis2=axis2, offset=offset)\n \n # def dot():\n # \"\"\"\n@@ -128,7 +128,7 @@ def outer(x1: array, x2: array, /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.outer(x1, x2)\n+ return np.outer._implementation(x1, x2)\n \n # def pinv():\n # \"\"\"\n@@ -176,7 +176,7 @@ def trace(x: array, /, *, axis1: int = 0, axis2: int = 1, offset: int = 0) -> ar\n \n See its docstring for more information.\n \"\"\"\n- return np.asarray(np.trace(x, axis1=axis1, axis2=axis2, offset=offset))\n+ return np.asarray(np.trace._implementation(x, axis1=axis1, axis2=axis2, offset=offset))\n \n def transpose(x: array, /, *, axes: Optional[Tuple[int, ...]] = None) -> array:\n \"\"\"\n@@ -184,4 +184,4 @@ def transpose(x: array, /, *, axes: Optional[Tuple[int, ...]] = None) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.transpose(x, axes=axes)\n+ return np.transpose._implementation(x, axes=axes)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_manipulation_functions.py", "new_path": "numpy/_array_api/_manipulation_functions.py", "diff": "@@ -19,7 +19,7 @@ def expand_dims(x: array, axis: int, /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.expand_dims(x, axis)\n+ return np.expand_dims._implementation(x, axis)\n \n def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array:\n \"\"\"\n@@ -27,7 +27,7 @@ def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) ->\n \n See its docstring for more information.\n \"\"\"\n- return np.flip(x, axis=axis)\n+ return np.flip._implementation(x, axis=axis)\n \n def reshape(x: array, shape: Tuple[int, ...], /) -> array:\n \"\"\"\n@@ -35,7 +35,7 @@ def reshape(x: array, shape: Tuple[int, ...], /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.reshape(x, shape)\n+ return np.reshape._implementation(x, shape)\n \n def roll(x: array, shift: Union[int, Tuple[int, ...]], /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array:\n \"\"\"\n@@ -43,7 +43,7 @@ def roll(x: array, shift: Union[int, Tuple[int, ...]], /, *, axis: Optional[Unio\n \n See its docstring for more information.\n \"\"\"\n- return np.roll(x, shift, axis=axis)\n+ return np.roll._implementation(x, shift, axis=axis)\n \n def squeeze(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> array:\n \"\"\"\n@@ -51,7 +51,7 @@ def squeeze(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None)\n \n See its docstring for more information.\n \"\"\"\n- return np.squeeze(x, axis=axis)\n+ return np.squeeze._implementation(x, axis=axis)\n \n def stack(arrays: Tuple[array], /, *, axis: int = 0) -> array:\n \"\"\"\n@@ -59,4 +59,4 @@ def stack(arrays: Tuple[array], /, *, axis: int = 0) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.stack(arrays, axis=axis)\n+ return np.stack._implementation(arrays, axis=axis)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_searching_functions.py", "new_path": "numpy/_array_api/_searching_functions.py", "diff": "@@ -11,7 +11,7 @@ def argmax(x: array, /, *, axis: int = None, keepdims: bool = False) -> array:\n See its docstring for more information.\n \"\"\"\n # Note: this currently fails as np.argmax does not implement keepdims\n- return np.asarray(np.argmax(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.argmax._implementation(x, axis=axis, keepdims=keepdims))\n \n def argmin(x: array, /, *, axis: int = None, keepdims: bool = False) -> array:\n \"\"\"\n@@ -20,7 +20,7 @@ def argmin(x: array, /, *, axis: int = None, keepdims: bool = False) -> array:\n See its docstring for more information.\n \"\"\"\n # Note: this currently fails as np.argmin does not implement keepdims\n- return np.asarray(np.argmin(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.argmin._implementation(x, axis=axis, keepdims=keepdims))\n \n def nonzero(x: array, /) -> Tuple[array, ...]:\n \"\"\"\n@@ -28,7 +28,7 @@ def nonzero(x: array, /) -> Tuple[array, ...]:\n \n See its docstring for more information.\n \"\"\"\n- return np.nonzero(x)\n+ return np.nonzero._implementation(x)\n \n def where(condition: array, x1: array, x2: array, /) -> array:\n \"\"\"\n@@ -36,4 +36,4 @@ def where(condition: array, x1: array, x2: array, /) -> array:\n \n See its docstring for more information.\n \"\"\"\n- return np.where(condition, x1, x2)\n+ return np.where._implementation(condition, x1, x2)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_set_functions.py", "new_path": "numpy/_array_api/_set_functions.py", "diff": "@@ -10,4 +10,4 @@ def unique(x: array, /, *, return_counts: bool = False, return_index: bool = Fal\n \n See its docstring for more information.\n \"\"\"\n- return np.unique(x, return_counts=return_counts, return_index=return_index, return_inverse=return_inverse, sorted=sorted)\n+ return np.unique._implementation(x, return_counts=return_counts, return_index=return_index, return_inverse=return_inverse, sorted=sorted)\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_sorting_functions.py", "new_path": "numpy/_array_api/_sorting_functions.py", "diff": "@@ -12,7 +12,7 @@ def argsort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bo\n \"\"\"\n # Note: this keyword argument is different, and the default is different.\n kind = 'stable' if stable else 'quicksort'\n- res = np.argsort(x, axis=axis, kind=kind)\n+ res = np.argsort._implementation(x, axis=axis, kind=kind)\n if descending:\n res = np.flip(res, axis=axis)\n return res\n@@ -25,7 +25,7 @@ def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool\n \"\"\"\n # Note: this keyword argument is different, and the default is different.\n kind = 'stable' if stable else 'quicksort'\n- res = np.sort(x, axis=axis, kind=kind)\n+ res = np.sort._implementation(x, axis=axis, kind=kind)\n if descending:\n res = np.flip(res, axis=axis)\n return res\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_statistical_functions.py", "new_path": "numpy/_array_api/_statistical_functions.py", "diff": "@@ -5,24 +5,24 @@ from ._types import Optional, Tuple, Union, array\n import numpy as np\n \n def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.max(x, axis=axis, keepdims=keepdims)\n+ return np.max._implementation(x, axis=axis, keepdims=keepdims)\n \n def mean(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.asarray(np.mean(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.mean._implementation(x, axis=axis, keepdims=keepdims))\n \n def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.min(x, axis=axis, keepdims=keepdims)\n+ return np.min._implementation(x, axis=axis, keepdims=keepdims)\n \n def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.asarray(np.prod(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.prod._implementation(x, axis=axis, keepdims=keepdims))\n \n def std(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, correction: Union[int, float] = 0.0, keepdims: bool = False) -> array:\n # Note: the keyword argument correction is different here\n- return np.asarray(np.std(x, axis=axis, ddof=correction, keepdims=keepdims))\n+ return np.asarray(np.std._implementation(x, axis=axis, ddof=correction, keepdims=keepdims))\n \n def sum(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n- return np.asarray(np.sum(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.sum._implementation(x, axis=axis, keepdims=keepdims))\n \n def var(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, correction: Union[int, float] = 0.0, keepdims: bool = False) -> array:\n # Note: the keyword argument correction is different here\n- return np.asarray(np.var(x, axis=axis, ddof=correction, keepdims=keepdims))\n+ return np.asarray(np.var._implementation(x, axis=axis, ddof=correction, keepdims=keepdims))\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_utility_functions.py", "new_path": "numpy/_array_api/_utility_functions.py", "diff": "@@ -10,7 +10,7 @@ def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep\n \n See its docstring for more information.\n \"\"\"\n- return np.asarray(np.all(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.all._implementation(x, axis=axis, keepdims=keepdims))\n \n def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:\n \"\"\"\n@@ -18,4 +18,4 @@ def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep\n \n See its docstring for more information.\n \"\"\"\n- return np.asarray(np.any(x, axis=axis, keepdims=keepdims))\n+ return np.asarray(np.any._implementation(x, axis=axis, keepdims=keepdims))\n" } ]
994ce07595026d5de54f52ef5748b578f9fae1bc
cupy/cupy
09.07.2021 13:57:44
MIT License
Use better type signatures in the array API module This includes returning custom dataclasses for finfo and iinfo that only contain the properties required by the array API specification.
[ { "change_type": "MODIFY", "old_path": "numpy/_array_api/_array_object.py", "new_path": "numpy/_array_api/_array_object.py", "diff": "@@ -396,7 +396,8 @@ class Array:\n res = self._array.__le__(other._array)\n return self.__class__._new(res)\n \n- def __len__(self, /):\n+ # Note: __len__ may end up being removed from the array API spec.\n+ def __len__(self, /) -> int:\n \"\"\"\n Performs the operation __len__.\n \"\"\"\n@@ -843,7 +844,7 @@ class Array:\n return self.__class__._new(res)\n \n @property\n- def dtype(self):\n+ def dtype(self) -> Dtype:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.dtype <numpy.ndarray.dtype>`.\n \n@@ -852,7 +853,7 @@ class Array:\n return self._array.dtype\n \n @property\n- def device(self):\n+ def device(self) -> Device:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.device <numpy.ndarray.device>`.\n \n@@ -862,7 +863,7 @@ class Array:\n raise NotImplementedError(\"The device attribute is not yet implemented\")\n \n @property\n- def ndim(self):\n+ def ndim(self) -> int:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.ndim <numpy.ndarray.ndim>`.\n \n@@ -871,7 +872,7 @@ class Array:\n return self._array.ndim\n \n @property\n- def shape(self):\n+ def shape(self) -> Tuple[int, ...]:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.shape <numpy.ndarray.shape>`.\n \n@@ -880,7 +881,7 @@ class Array:\n return self._array.shape\n \n @property\n- def size(self):\n+ def size(self) -> int:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.size <numpy.ndarray.size>`.\n \n@@ -889,7 +890,7 @@ class Array:\n return self._array.size\n \n @property\n- def T(self):\n+ def T(self) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndaray.T <numpy.ndarray.T>`.\n \n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_creation_functions.py", "new_path": "numpy/_array_api/_creation_functions.py", "diff": "@@ -10,7 +10,7 @@ from ._dtypes import _all_dtypes\n \n import numpy as np\n \n-def asarray(obj: Union[float, NestedSequence[bool|int|float], SupportsDLPack, SupportsBufferProtocol], /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, copy: Optional[bool] = None) -> Array:\n+def asarray(obj: Union[Array, float, NestedSequence[bool|int|float], SupportsDLPack, SupportsBufferProtocol], /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, copy: Optional[bool] = None) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.asarray <numpy.asarray>`.\n \n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_data_type_functions.py", "new_path": "numpy/_array_api/_data_type_functions.py", "diff": "@@ -2,6 +2,7 @@ from __future__ import annotations\n \n from ._array_object import Array\n \n+from dataclasses import dataclass\n from typing import TYPE_CHECKING\n if TYPE_CHECKING:\n from ._types import List, Tuple, Union, Dtype\n@@ -38,13 +39,44 @@ def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:\n from_ = from_._array\n return np.can_cast(from_, to)\n \n+# These are internal objects for the return types of finfo and iinfo, since\n+# the NumPy versions contain extra data that isn't part of the spec.\n+@dataclass\n+class finfo_object:\n+ bits: int\n+ # Note: The types of the float data here are float, whereas in NumPy they\n+ # are scalars of the corresponding float dtype.\n+ eps: float\n+ max: float\n+ min: float\n+ # Note: smallest_normal is part of the array API spec, but cannot be used\n+ # until https://github.com/numpy/numpy/pull/18536 is merged.\n+\n+ # smallest_normal: float\n+\n+@dataclass\n+class iinfo_object:\n+ bits: int\n+ max: int\n+ min: int\n+\n def finfo(type: Union[Dtype, Array], /) -> finfo_object:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.finfo <numpy.finfo>`.\n \n See its docstring for more information.\n \"\"\"\n- return np.finfo(type)\n+ fi = np.finfo(type)\n+ # Note: The types of the float data here are float, whereas in NumPy they\n+ # are scalars of the corresponding float dtype.\n+ return finfo_object(\n+ fi.bits,\n+ float(fi.eps),\n+ float(fi.max),\n+ float(fi.min),\n+ # TODO: Uncomment this when #18536 is merged.\n+ # float(fi.smallest_normal),\n+ )\n \n def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:\n \"\"\"\n@@ -52,7 +84,8 @@ def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:\n \n See its docstring for more information.\n \"\"\"\n- return np.iinfo(type)\n+ ii = np.iinfo(type)\n+ return iinfo_object(ii.bits, ii.max, ii.min)\n \n def result_type(*arrays_and_dtypes: Sequence[Union[Array, Dtype]]) -> Dtype:\n \"\"\"\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_manipulation_functions.py", "new_path": "numpy/_array_api/_manipulation_functions.py", "diff": "@@ -7,7 +7,7 @@ from typing import List, Optional, Tuple, Union\n import numpy as np\n \n # Note: the function name is different here\n-def concat(arrays: Tuple[Array, ...], /, *, axis: Optional[int] = 0) -> Array:\n+def concat(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.concatenate <numpy.concatenate>`.\n \n@@ -56,7 +56,7 @@ def squeeze(x: Array, /, axis: Optional[Union[int, Tuple[int, ...]]] = None) ->\n \"\"\"\n return Array._new(np.squeeze(x._array, axis=axis))\n \n-def stack(arrays: Tuple[Array, ...], /, *, axis: int = 0) -> Array:\n+def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.stack <numpy.stack>`.\n \n" } ]
783d157701ea6afa16a620669f89720864e62e9e
cupy/cupy
09.07.2021 18:08:22
MIT License
Make the array API left and right shift do type promotion The spec previously said it should return the type of the left argument, but this was changed to do type promotion to be consistent with all the other elementwise functions/operators.
[ { "change_type": "MODIFY", "old_path": "numpy/_array_api/_array_object.py", "new_path": "numpy/_array_api/_array_object.py", "diff": "@@ -410,11 +410,8 @@ class Array:\n \"\"\"\n if isinstance(other, (int, float, bool)):\n other = self._promote_scalar(other)\n- # Note: The spec requires the return dtype of bitwise_left_shift, and\n- # hence also __lshift__, to be the same as the first argument.\n- # np.ndarray.__lshift__ returns a type that is the type promotion of\n- # the two input types.\n- res = self._array.__lshift__(other._array).astype(self.dtype)\n+ self, other = self._normalize_two_args(self, other)\n+ res = self._array.__lshift__(other._array)\n return self.__class__._new(res)\n \n def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:\n@@ -517,11 +514,8 @@ class Array:\n \"\"\"\n if isinstance(other, (int, float, bool)):\n other = self._promote_scalar(other)\n- # Note: The spec requires the return dtype of bitwise_right_shift, and\n- # hence also __rshift__, to be the same as the first argument.\n- # np.ndarray.__rshift__ returns a type that is the type promotion of\n- # the two input types.\n- res = self._array.__rshift__(other._array).astype(self.dtype)\n+ self, other = self._normalize_two_args(self, other)\n+ res = self._array.__rshift__(other._array)\n return self.__class__._new(res)\n \n def __setitem__(self, key, value, /):\n@@ -646,11 +640,8 @@ class Array:\n \"\"\"\n if isinstance(other, (int, float, bool)):\n other = self._promote_scalar(other)\n- # Note: The spec requires the return dtype of bitwise_left_shift, and\n- # hence also __lshift__, to be the same as the first argument.\n- # np.ndarray.__lshift__ returns a type that is the type promotion of\n- # the two input types.\n- res = self._array.__rlshift__(other._array).astype(other.dtype)\n+ self, other = self._normalize_two_args(self, other)\n+ res = self._array.__rlshift__(other._array)\n return self.__class__._new(res)\n \n def __imatmul__(self: Array, other: Array, /) -> Array:\n@@ -787,11 +778,8 @@ class Array:\n \"\"\"\n if isinstance(other, (int, float, bool)):\n other = self._promote_scalar(other)\n- # Note: The spec requires the return dtype of bitwise_right_shift, and\n- # hence also __rshift__, to be the same as the first argument.\n- # np.ndarray.__rshift__ returns a type that is the type promotion of\n- # the two input types.\n- res = self._array.__rrshift__(other._array).astype(other.dtype)\n+ self, other = self._normalize_two_args(self, other)\n+ res = self._array.__rrshift__(other._array)\n return self.__class__._new(res)\n \n @np.errstate(all='ignore')\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_elementwise_functions.py", "new_path": "numpy/_array_api/_elementwise_functions.py", "diff": "@@ -136,10 +136,7 @@ def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:\n # Note: bitwise_left_shift is only defined for x2 nonnegative.\n if np.any(x2._array < 0):\n raise ValueError('bitwise_left_shift(x1, x2) is only defined for x2 >= 0')\n- # Note: The spec requires the return dtype of bitwise_left_shift to be the\n- # same as the first argument. np.left_shift() returns a type that is the\n- # type promotion of the two input types.\n- return Array._new(np.left_shift(x1._array, x2._array).astype(x1.dtype))\n+ return Array._new(np.left_shift(x1._array, x2._array))\n \n # Note: the function name is different here\n def bitwise_invert(x: Array, /) -> Array:\n@@ -176,10 +173,7 @@ def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:\n # Note: bitwise_right_shift is only defined for x2 nonnegative.\n if np.any(x2._array < 0):\n raise ValueError('bitwise_right_shift(x1, x2) is only defined for x2 >= 0')\n- # Note: The spec requires the return dtype of bitwise_left_shift to be the\n- # same as the first argument. np.left_shift() returns a type that is the\n- # type promotion of the two input types.\n- return Array._new(np.right_shift(x1._array, x2._array).astype(x1.dtype))\n+ return Array._new(np.right_shift(x1._array, x2._array))\n \n def bitwise_xor(x1: Array, x2: Array, /) -> Array:\n \"\"\"\n" } ]
29535ad693507084ff3691fefd637a6b7292674f
cupy/cupy
21.07.2021 15:45:36
MIT License
Implement the array API result_type() manually np.result_type() has too many behaviors that we want to avoid in the array API namespace, like value-based casting and unwanted type promotions. Instead, we implement the exact type promotion table from the spec.
[ { "change_type": "MODIFY", "old_path": "numpy/_array_api/_data_type_functions.py", "new_path": "numpy/_array_api/_data_type_functions.py", "diff": "@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n from ._array_object import Array\n-from ._dtypes import _all_dtypes\n+from ._dtypes import _all_dtypes, _result_type\n \n from dataclasses import dataclass\n from typing import TYPE_CHECKING, List, Tuple, Union\n@@ -94,12 +94,24 @@ def result_type(*arrays_and_dtypes: Sequence[Union[Array, Dtype]]) -> Dtype:\n \n See its docstring for more information.\n \"\"\"\n+ # Note: we use a custom implementation that gives only the type promotions\n+ # required by the spec rather than using np.result_type. NumPy implements\n+ # too many extra type promotions like int64 + uint64 -> float64, and does\n+ # value-based casting on scalar arrays.\n A = []\n for a in arrays_and_dtypes:\n if isinstance(a, Array):\n- a = a._array\n+ a = a.dtype\n elif isinstance(a, np.ndarray) or a not in _all_dtypes:\n raise TypeError(\"result_type() inputs must be array_api arrays or dtypes\")\n A.append(a)\n \n- return np.result_type(*A)\n+ if len(A) == 0:\n+ raise ValueError(\"at least one array or dtype is required\")\n+ elif len(A) == 1:\n+ return A[0]\n+ else:\n+ t = A[0]\n+ for t2 in A[1:]:\n+ t = _result_type(t, t2)\n+ return t\n" }, { "change_type": "MODIFY", "old_path": "numpy/_array_api/_dtypes.py", "new_path": "numpy/_array_api/_dtypes.py", "diff": "@@ -22,3 +22,72 @@ _floating_dtypes = (float32, float64)\n _integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)\n _integer_or_boolean_dtypes = (bool, int8, int16, int32, int64, uint8, uint16, uint32, uint64)\n _numeric_dtypes = (float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64)\n+\n+_promotion_table = {\n+ (int8, int8): int8,\n+ (int8, int16): int16,\n+ (int8, int32): int32,\n+ (int8, int64): int64,\n+ (int16, int8): int16,\n+ (int16, int16): int16,\n+ (int16, int32): int32,\n+ (int16, int64): int64,\n+ (int32, int8): int32,\n+ (int32, int16): int32,\n+ (int32, int32): int32,\n+ (int32, int64): int64,\n+ (int64, int8): int64,\n+ (int64, int16): int64,\n+ (int64, int32): int64,\n+ (int64, int64): int64,\n+ (uint8, uint8): uint8,\n+ (uint8, uint16): uint16,\n+ (uint8, uint32): uint32,\n+ (uint8, uint64): uint64,\n+ (uint16, uint8): uint16,\n+ (uint16, uint16): uint16,\n+ (uint16, uint32): uint32,\n+ (uint16, uint64): uint64,\n+ (uint32, uint8): uint32,\n+ (uint32, uint16): uint32,\n+ (uint32, uint32): uint32,\n+ (uint32, uint64): uint64,\n+ (uint64, uint8): uint64,\n+ (uint64, uint16): uint64,\n+ (uint64, uint32): uint64,\n+ (uint64, uint64): uint64,\n+ (int8, uint8): int16,\n+ (int8, uint16): int32,\n+ (int8, uint32): int64,\n+ (int16, uint8): int16,\n+ (int16, uint16): int32,\n+ (int16, uint32): int64,\n+ (int32, uint8): int32,\n+ (int32, uint16): int32,\n+ (int32, uint32): int64,\n+ (int64, uint8): int64,\n+ (int64, uint16): int64,\n+ (int64, uint32): int64,\n+ (uint8, int8): int16,\n+ (uint16, int8): int32,\n+ (uint32, int8): int64,\n+ (uint8, int16): int16,\n+ (uint16, int16): int32,\n+ (uint32, int16): int64,\n+ (uint8, int32): int32,\n+ (uint16, int32): int32,\n+ (uint32, int32): int64,\n+ (uint8, int64): int64,\n+ (uint16, int64): int64,\n+ (uint32, int64): int64,\n+ (float32, float32): float32,\n+ (float32, float64): float64,\n+ (float64, float32): float64,\n+ (float64, float64): float64,\n+ (bool, bool): bool,\n+}\n+\n+def _result_type(type1, type2):\n+ if (type1, type2) in _promotion_table:\n+ return _promotion_table[type1, type2]\n+ raise TypeError(f\"{type1} and {type2} cannot be type promoted together\")\n" } ]
4877478d275959f746dab4f7b91bfe68956f26f1
netflix/security_monkey
26.01.2018 18:59:26
Apache License 2.0
Fix for orphaned items that may develop from a failed watcher event. - Also added optional (but on by default) silencing of verbose and useless botocore logs.
[ { "change_type": "MODIFY", "old_path": "security_monkey/datastore_utils.py", "new_path": "security_monkey/datastore_utils.py", "diff": "@@ -95,7 +95,6 @@ def create_item(item, technology, account):\n )\n \n \n-\n def detect_change(item, account, technology, complete_hash, durable_hash):\n \"\"\"\n Checks the database to see if the latest revision of the specified\n" }, { "change_type": "MODIFY", "old_path": "security_monkey/task_scheduler/tasks.py", "new_path": "security_monkey/task_scheduler/tasks.py", "diff": "@@ -12,7 +12,7 @@ import traceback\n \n from security_monkey import app, db, jirasync, sentry\n from security_monkey.alerter import Alerter\n-from security_monkey.datastore import store_exception, clear_old_exceptions\n+from security_monkey.datastore import store_exception, clear_old_exceptions, Technology, Account, Item, ItemRevision\n from security_monkey.monitors import get_monitors, get_monitors_and_dependencies\n from security_monkey.reporter import Reporter\n from security_monkey.task_scheduler.util import CELERY, setup\n@@ -70,9 +70,57 @@ def clear_expired_exceptions():\n app.logger.info(\"[-] Completed clearing out exceptions that have an expired TTL.\")\n \n \n+def fix_orphaned_deletions(account_name, technology_name):\n+ \"\"\"\n+ Possible issue with orphaned items. This will check if there are any, and will assume that the item\n+ was deleted. This will create a deletion change record to it.\n+\n+ :param account_name:\n+ :param technology_name:\n+ :return:\n+ \"\"\"\n+ # If technology doesn't exist, then create it:\n+ technology = Technology.query.filter(Technology.name == technology_name).first()\n+ if not technology:\n+ technology = Technology(name=technology_name)\n+ db.session.add(technology)\n+ db.session.commit()\n+ app.logger.info(\"Technology: {} did not exist... created it...\".format(technology_name))\n+\n+ account = Account.query.filter(Account.name == account_name).one()\n+\n+ # Query for orphaned items of the given technology/account pair:\n+ orphaned_items = Item.query.filter(Item.account_id == account.id, Item.tech_id == technology.id,\n+ Item.latest_revision_id == None).all() # noqa\n+\n+ if not orphaned_items:\n+ app.logger.info(\"[@] No orphaned items have been found. (This is good)\")\n+ return\n+\n+ # Fix the orphaned items:\n+ for oi in orphaned_items:\n+ app.logger.error(\"[?] Found an orphaned item: {}. Creating a deletion record for it\".format(oi.name))\n+ revision = ItemRevision(active=False, config={})\n+ oi.revisions.append(revision)\n+ db.session.add(revision)\n+ db.session.add(oi)\n+ db.session.commit()\n+\n+ # Update the latest revision id:\n+ db.session.refresh(revision)\n+ oi.latest_revision_id = revision.id\n+ db.session.add(oi)\n+\n+ db.session.commit()\n+ app.logger.info(\"[-] Created deletion record for item: {}.\".format(oi.name))\n+\n+\n def reporter_logic(account_name, technology_name):\n \"\"\"Logic for the run change reporter\"\"\"\n try:\n+ # Before doing anything... Look for orphaned items for this given technology. If they exist, then delete them:\n+ fix_orphaned_deletions(account_name, technology_name)\n+\n # Watch and Audit:\n monitors = find_changes(account_name, technology_name)\n \n@@ -140,6 +188,9 @@ def find_changes(account_name, monitor_name, debug=True):\n Runs the watcher and stores the result, re-audits all types to account\n for downstream dependencies.\n \"\"\"\n+ # Before doing anything... Look for orphaned items for this given technology. If they exist, then delete them:\n+ fix_orphaned_deletions(account_name, monitor_name)\n+\n monitors = get_monitors(account_name, [monitor_name], debug)\n for mon in monitors:\n cw = mon.watcher\n" }, { "change_type": "MODIFY", "old_path": "security_monkey/tests/scheduling/test_celery_scheduler.py", "new_path": "security_monkey/tests/scheduling/test_celery_scheduler.py", "diff": "@@ -84,7 +84,8 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n \n db.session.commit()\n \n- def test_find_batch_changes(self):\n+ @patch(\"security_monkey.task_scheduler.tasks.fix_orphaned_deletions\")\n+ def test_find_batch_changes(self, mock_fix_orphaned):\n \"\"\"\n Runs through a full find job via the IAMRole watcher, as that supports batching.\n \n@@ -92,7 +93,7 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n not going to do any boto work and that will instead be mocked out.\n :return:\n \"\"\"\n- from security_monkey.task_scheduler.tasks import manual_run_change_finder, setup\n+ from security_monkey.task_scheduler.tasks import manual_run_change_finder\n from security_monkey.monitors import Monitor\n from security_monkey.watchers.iam.iam_role import IAMRole\n from security_monkey.auditors.iam.iam_role import IAMRoleAuditor\n@@ -142,6 +143,7 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n watcher.slurp = mock_slurp\n \n manual_run_change_finder([test_account.name], [watcher.index])\n+ assert mock_fix_orphaned.called\n \n # Check that all items were added to the DB:\n assert len(Item.query.all()) == 11\n@@ -271,8 +273,9 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n client.put_role_policy(RoleName=\"roleNumber{}\".format(x), PolicyName=\"testpolicy\",\n PolicyDocument=json.dumps(OPEN_POLICY, indent=4))\n \n- def test_report_batch_changes(self):\n- from security_monkey.task_scheduler.tasks import manual_run_change_reporter, setup\n+ @patch(\"security_monkey.task_scheduler.tasks.fix_orphaned_deletions\")\n+ def test_report_batch_changes(self, mock_fix_orphaned):\n+ from security_monkey.task_scheduler.tasks import manual_run_change_reporter\n from security_monkey.datastore import Item, ItemRevision, ItemAudit\n from security_monkey.monitors import Monitor\n from security_monkey.watchers.iam.iam_role import IAMRole\n@@ -327,6 +330,8 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n \n manual_run_change_reporter([test_account.name])\n \n+ assert mock_fix_orphaned.called\n+\n # Check that all items were added to the DB:\n assert len(Item.query.all()) == 11\n \n@@ -348,6 +353,32 @@ class CelerySchedulerTestCase(SecurityMonkeyTestCase):\n purge_it()\n assert mock.control.purge.called\n \n+ def test_fix_orphaned_deletions(self):\n+ test_account = Account.query.filter(Account.name == \"TEST_ACCOUNT1\").one()\n+ technology = Technology(name=\"orphaned\")\n+\n+ db.session.add(technology)\n+ db.session.commit()\n+\n+ orphaned_item = Item(name=\"orphaned\", region=\"us-east-1\", tech_id=technology.id, account_id=test_account.id)\n+ db.session.add(orphaned_item)\n+ db.session.commit()\n+\n+ assert not orphaned_item.latest_revision_id\n+ assert not orphaned_item.revisions.count()\n+ assert len(Item.query.filter(Item.account_id == test_account.id, Item.tech_id == technology.id,\n+ Item.latest_revision_id == None).all()) == 1 # noqa\n+\n+ from security_monkey.task_scheduler.tasks import fix_orphaned_deletions\n+ fix_orphaned_deletions(test_account.name, technology.name)\n+\n+ assert not Item.query.filter(Item.account_id == test_account.id, Item.tech_id == technology.id,\n+ Item.latest_revision_id == None).all() # noqa\n+\n+ assert orphaned_item.latest_revision_id\n+ assert orphaned_item.revisions.count() == 1\n+ assert orphaned_item.latest_config == {}\n+\n @patch(\"security_monkey.task_scheduler.beat.setup\")\n @patch(\"security_monkey.task_scheduler.beat.purge_it\")\n @patch(\"security_monkey.task_scheduler.tasks.task_account_tech\")\n" }, { "change_type": "MODIFY", "old_path": "security_monkey/watcher.py", "new_path": "security_monkey/watcher.py", "diff": "@@ -26,10 +26,17 @@ from copy import deepcopy\n import dpath.util\n from dpath.exceptions import PathNotFound\n \n+import logging\n+\n watcher_registry = {}\n abstract_classes = set(['Watcher', 'CloudAuxWatcher', 'CloudAuxBatchedWatcher'])\n \n \n+if not app.config.get(\"DONT_IGNORE_BOTO_VERBOSE_LOGGERS\"):\n+ logging.getLogger('botocore.vendored.requests.packages.urllib3').setLevel(logging.WARNING)\n+ logging.getLogger('botocore.credentials').setLevel(logging.WARNING)\n+\n+\n class WatcherType(type):\n def __init__(cls, name, bases, attrs):\n super(WatcherType, cls).__init__(name, bases, attrs)\n" }, { "change_type": "MODIFY", "old_path": "security_monkey/watchers/sqs.py", "new_path": "security_monkey/watchers/sqs.py", "diff": "@@ -67,10 +67,15 @@ class SQS(CloudAuxBatchedWatcher):\n \n # Offset by the existing items in the list (from other regions)\n offset = len(self.corresponding_items)\n+ queue_count = -1\n \n- for i in range(0, len(queues)):\n- items.append({\"Url\": queues[i], \"Region\": kwargs[\"region\"]})\n- self.corresponding_items[queues[i]] = i + offset\n+ for item_count in range(0, len(queues)):\n+ if self.corresponding_items.get(queues[item_count]):\n+ app.logger.error(\"[?] Received a duplicate item in the SQS list: {}. Skipping it.\".format(queues[item_count]))\n+ continue\n+ queue_count += 1\n+ items.append({\"Url\": queues[item_count], \"Region\": kwargs[\"region\"]})\n+ self.corresponding_items[queues[item_count]] = queue_count + offset\n \n return items\n \n" } ]
84fd14194ddaa5b890e4479def071ce53a93b9d4
netflix/security_monkey
07.05.2018 10:58:36
Apache License 2.0
Add options to post metrics to queue This commit adds an option to SM to post metrics to cloudwatch. Metric data will be posted whenever scan queue items are added or removed.
[ { "change_type": "MODIFY", "old_path": "docs/autostarting.md", "new_path": "docs/autostarting.md", "diff": "@@ -5,6 +5,7 @@ This document outlines how to configure Security Monkey to:\n \n 1. Automatically run the API\n 1. Automatically scan for changes in your environment.\n+1. Configure Security Monkey to send scanning performance metrics\n \n Each section is important, please read them thoroughly.\n \n@@ -180,6 +181,11 @@ Supervisor will run the Celery `worker` command, which is:\n so keep the supervisor configurations on these instances separate.\n \n \n+Configure Security Monkey to send scanning performance metrics\n+--------------------------------------------------------------\n+Security Monkey can be configured to send metrics when objects are added or removed from the scanning queue. This allows operators to check Security Monkey performance and ensure that items are being processed from the queue in a timely manner. To do so set `METRICS_ENABLED` to `True`. You will need `cloudwatch:PutMetricData` permission. Metrics will be posted with the namespace `securitymonkey` unless configured using the variable `METRICS_NAMESPACE`. You will also want to set `METRICS_POST_REGION` with the region you want to post CloudWatch Metrics to (default: `us-east-1`).\n+\n+\n Deployment Strategies\n --------------------\n A typical deployment strategy is:\n" }, { "change_type": "MODIFY", "old_path": "security_monkey/task_scheduler/tasks.py", "new_path": "security_monkey/task_scheduler/tasks.py", "diff": "@@ -26,6 +26,7 @@ from security_monkey.datastore import store_exception, clear_old_exceptions, Tec\n from security_monkey.monitors import get_monitors, get_monitors_and_dependencies\n from security_monkey.reporter import Reporter\n from security_monkey.task_scheduler.util import CELERY, setup\n+import boto3\n from sqlalchemy.exc import OperationalError, InvalidRequestError, StatementError\n \n \n@@ -216,6 +217,8 @@ def find_changes(account_name, monitor_name, debug=True):\n fix_orphaned_deletions(account_name, monitor_name)\n \n monitors = get_monitors(account_name, [monitor_name], debug)\n+\n+ items = []\n for mon in monitors:\n cw = mon.watcher\n app.logger.info(\"[-->] Looking for changes in account: {}, technology: {}\".format(account_name, cw.index))\n@@ -224,17 +227,26 @@ def find_changes(account_name, monitor_name, debug=True):\n else:\n # Just fetch normally...\n (items, exception_map) = cw.slurp()\n+\n+ _post_metric(\n+ 'queue_items_added',\n+ len(items),\n+ account_name=account_name,\n+ tech=cw.i_am_singular\n+ )\n+\n cw.find_changes(current=items, exception_map=exception_map)\n+\n cw.save()\n \n # Batched monitors have already been monitored, and they will be skipped over.\n- audit_changes([account_name], [monitor_name], False, debug)\n+ audit_changes([account_name], [monitor_name], False, debug, items_count=len(items))\n db.session.close()\n \n return monitors\n \n \n-def audit_changes(accounts, monitor_names, send_report, debug=True, skip_batch=True):\n+def audit_changes(accounts, monitor_names, send_report, debug=True, skip_batch=True, items_count=None):\n \"\"\"\n Audits changes in the accounts\n :param accounts:\n@@ -254,6 +266,13 @@ def audit_changes(accounts, monitor_names, send_report, debug=True, skip_batch=T\n app.logger.debug(\"[-->] Auditing account: {}, technology: {}\".format(account, monitor.watcher.index))\n _audit_changes(account, monitor.auditors, send_report, debug)\n \n+ _post_metric(\n+ 'queue_items_completed',\n+ items_count,\n+ account_name=account,\n+ tech=monitor.watcher.i_am_singular\n+ )\n+\n \n def batch_logic(monitor, current_watcher, account_name, debug):\n \"\"\"\n@@ -293,9 +312,23 @@ def batch_logic(monitor, current_watcher, account_name, debug):\n ))\n (items, exception_map) = current_watcher.slurp()\n \n+ _post_metric(\n+ 'queue_items_added',\n+ len(items),\n+ account_name=account_name,\n+ tech=current_watcher.i_am_singular\n+ )\n+\n audit_items = current_watcher.find_changes(current=items, exception_map=exception_map)\n _audit_specific_changes(monitor, audit_items, False, debug)\n \n+ _post_metric(\n+ 'queue_items_completed',\n+ len(items),\n+ account_name=account_name,\n+ tech=current_watcher.i_am_singular\n+ )\n+\n # Delete the items that no longer exist:\n app.logger.debug(\"[-->] Deleting all items for {technology}/{account} that no longer exist.\".format(\n technology=current_watcher.i_am_plural, account=account_name\n@@ -349,3 +382,31 @@ def _audit_specific_changes(monitor, audit_items, send_report, debug=True):\n monitor.watcher.accounts[0])\n db.session.remove()\n store_exception(\"scheduler-audit-changes\", None, e)\n+\n+\n+def _post_metric(event_type, amount, account_name=None, tech=None):\n+ if not app.config.get('METRICS_ENABLED', False):\n+ return\n+\n+ cw_client = boto3.client('cloudwatch', region_name=app.config.get('METRICS_POST_REGION', 'us-east-1'))\n+ cw_client.put_metric_data(\n+ Namespace=app.config.get('METRICS_NAMESPACE', 'securitymonkey'),\n+ MetricData=[\n+ {\n+ 'MetricName': event_type,\n+ 'Timestamp': int(time.time()),\n+ 'Value': amount,\n+ 'Unit': 'Count',\n+ 'Dimensions': [\n+ {\n+ 'Name': 'tech',\n+ 'Value': tech\n+ },\n+ {\n+ 'Name': 'account_number',\n+ 'Value': Account.query.filter(Account.name == account_name).first().identifier\n+ }\n+ ]\n+ }\n+ ]\n+ )\n" } ]
0b2146c8f794d5642a0a4feb9152916b49fd4be8
mesonbuild/meson
06.02.2017 11:51:46
Apache License 2.0
Use named field for command_template when generating ninja command. The command template become easier to read with named field.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/ninjabackend.py", "new_path": "mesonbuild/backend/ninjabackend.py", "diff": "@@ -1232,15 +1232,16 @@ int dummy;\n return\n rule = 'rule STATIC%s_LINKER\\n' % crstr\n if mesonlib.is_windows():\n- command_templ = ''' command = %s @$out.rsp\n+ command_template = ''' command = {executable} @$out.rsp\n rspfile = $out.rsp\n- rspfile_content = $LINK_ARGS %s $in\n+ rspfile_content = $LINK_ARGS {output_args} $in\n '''\n else:\n- command_templ = ' command = %s $LINK_ARGS %s $in\\n'\n- command = command_templ % (\n- ' '.join(static_linker.get_exelist()),\n- ' '.join(static_linker.get_output_args('$out')))\n+ command_template = ' command = {executable} $LINK_ARGS {output_args} $in\\n'\n+ command = command_template.format(\n+ executable=' '.join(static_linker.get_exelist()),\n+ output_args=' '.join(static_linker.get_output_args('$out'))\n+ )\n description = ' description = Static linking library $out\\n\\n'\n outfile.write(rule)\n outfile.write(command)\n@@ -1273,16 +1274,17 @@ int dummy;\n pass\n rule = 'rule %s%s_LINKER\\n' % (langname, crstr)\n if mesonlib.is_windows():\n- command_template = ''' command = %s @$out.rsp\n+ command_template = ''' command = {executable} @$out.rsp\n rspfile = $out.rsp\n- rspfile_content = %s $ARGS %s $in $LINK_ARGS $aliasing\n+ rspfile_content = {cross_args} $ARGS {output_args} $in $LINK_ARGS $aliasing\n '''\n else:\n- command_template = ' command = %s %s $ARGS %s $in $LINK_ARGS $aliasing\\n'\n- command = command_template % (\n- ' '.join(compiler.get_linker_exelist()),\n- ' '.join(cross_args),\n- ' '.join(compiler.get_linker_output_args('$out')))\n+ command_template = ' command = {executable} {cross_args} $ARGS {output_args} $in $LINK_ARGS $aliasing\\n'\n+ command = command_template.format(\n+ executable=' '.join(compiler.get_linker_exelist()),\n+ cross_args=' '.join(cross_args),\n+ output_args=' '.join(compiler.get_linker_output_args('$out'))\n+ )\n description = ' description = Linking target $out'\n outfile.write(rule)\n outfile.write(command)\n@@ -1386,17 +1388,18 @@ rule FORTRAN_DEP_HACK\n if getattr(self, 'created_llvm_ir_rule', False):\n return\n rule = 'rule llvm_ir{}_COMPILER\\n'.format('_CROSS' if is_cross else '')\n- args = [' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n- ' '.join(self.get_cross_info_lang_args(compiler.language, is_cross)),\n- ' '.join(compiler.get_output_args('$out')),\n- ' '.join(compiler.get_compile_only_args())]\n if mesonlib.is_windows():\n- command_template = ' command = {} @$out.rsp\\n' \\\n+ command_template = ' command = {executable} @$out.rsp\\n' \\\n ' rspfile = $out.rsp\\n' \\\n- ' rspfile_content = {} $ARGS {} {} $in\\n'\n+ ' rspfile_content = {cross_args} $ARGS {output_args} {compile_only_args} $in\\n'\n else:\n- command_template = ' command = {} {} $ARGS {} {} $in\\n'\n- command = command_template.format(*args)\n+ command_template = ' command = {executable} {cross_args} $ARGS {output_args} {compile_only_args} $in\\n'\n+ command = command_template.format(\n+ executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n+ cross_args=' '.join(self.get_cross_info_lang_args(compiler.language, is_cross)),\n+ output_args=' '.join(compiler.get_output_args('$out')),\n+ compile_only_args=' '.join(compiler.get_compile_only_args())\n+ )\n description = ' description = Compiling LLVM IR object $in.\\n'\n outfile.write(rule)\n outfile.write(command)\n@@ -1448,18 +1451,19 @@ rule FORTRAN_DEP_HACK\n quoted_depargs.append(d)\n cross_args = self.get_cross_info_lang_args(langname, is_cross)\n if mesonlib.is_windows():\n- command_template = ''' command = %s @$out.rsp\n+ command_template = ''' command = {executable} @$out.rsp\n rspfile = $out.rsp\n- rspfile_content = %s $ARGS %s %s %s $in\n+ rspfile_content = {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\n '''\n else:\n- command_template = ' command = %s %s $ARGS %s %s %s $in\\n'\n- command = command_template % (\n- ' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n- ' '.join(cross_args),\n- ' '.join(quoted_depargs),\n- ' '.join(compiler.get_output_args('$out')),\n- ' '.join(compiler.get_compile_only_args()))\n+ command_template = ' command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\\n'\n+ command = command_template.format(\n+ executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),\n+ cross_args=' '.join(cross_args),\n+ dep_args=' '.join(quoted_depargs),\n+ output_args=' '.join(compiler.get_output_args('$out')),\n+ compile_only_args=' '.join(compiler.get_compile_only_args())\n+ )\n description = ' description = Compiling %s object $out\\n' % langname\n if compiler.get_id() == 'msvc':\n deps = ' deps = msvc\\n'\n@@ -1497,12 +1501,13 @@ rule FORTRAN_DEP_HACK\n output = ''\n else:\n output = ' '.join(compiler.get_output_args('$out'))\n- command = \" command = %s %s $ARGS %s %s %s $in\\n\" % (\n- ' '.join(compiler.get_exelist()),\n- ' '.join(cross_args),\n- ' '.join(quoted_depargs),\n- output,\n- ' '.join(compiler.get_compile_only_args()))\n+ command = \" command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\\n\".format(\n+ executable=' '.join(compiler.get_exelist()),\n+ cross_args=' '.join(cross_args),\n+ dep_args=' '.join(quoted_depargs),\n+ output_args=output,\n+ compile_only_args=' '.join(compiler.get_compile_only_args())\n+ )\n description = ' description = Precompiling header %s\\n' % '$in'\n if compiler.get_id() == 'msvc':\n deps = ' deps = msvc\\n'\n" } ]
73b2ee08a884d6baa7b8e3c35c6da8f17aa9a875
mesonbuild/meson
13.02.2017 20:59:03
Apache License 2.0
Rewrite custom_target template string substitution Factor it out into a function in mesonlib.py. This will allow us to reuse it for generators and for configure_file(). The latter doesn't implement this at all right now. Also includes unit tests.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/backends.py", "new_path": "mesonbuild/backend/backends.py", "diff": "@@ -603,19 +603,15 @@ class Backend:\n return srcs\n \n def eval_custom_target_command(self, target, absolute_outputs=False):\n- # We only want the outputs to be absolute when using the VS backend\n- if not absolute_outputs:\n- ofilenames = [os.path.join(self.get_target_dir(target), i) for i in target.output]\n- else:\n- ofilenames = [os.path.join(self.environment.get_build_dir(), self.get_target_dir(target), i)\n- for i in target.output]\n- srcs = self.get_custom_target_sources(target)\n+ # We want the outputs to be absolute only when using the VS backend\n outdir = self.get_target_dir(target)\n- # Many external programs fail on empty arguments.\n- if outdir == '':\n- outdir = '.'\n- if target.absolute_paths:\n+ if absolute_outputs:\n outdir = os.path.join(self.environment.get_build_dir(), outdir)\n+ outputs = []\n+ for i in target.output:\n+ outputs.append(os.path.join(outdir, i))\n+ inputs = self.get_custom_target_sources(target)\n+ # Evaluate the command list\n cmd = []\n for i in target.command:\n if isinstance(i, build.Executable):\n@@ -631,37 +627,10 @@ class Backend:\n if target.absolute_paths:\n i = os.path.join(self.environment.get_build_dir(), i)\n # FIXME: str types are blindly added ignoring 'target.absolute_paths'\n+ # because we can't know if they refer to a file or just a string\n elif not isinstance(i, str):\n err_msg = 'Argument {0} is of unknown type {1}'\n raise RuntimeError(err_msg.format(str(i), str(type(i))))\n- for (j, src) in enumerate(srcs):\n- i = i.replace('@INPUT%d@' % j, src)\n- for (j, res) in enumerate(ofilenames):\n- i = i.replace('@OUTPUT%d@' % j, res)\n- if '@INPUT@' in i:\n- msg = 'Custom target {} has @INPUT@ in the command, but'.format(target.name)\n- if len(srcs) == 0:\n- raise MesonException(msg + ' no input files')\n- if i == '@INPUT@':\n- cmd += srcs\n- continue\n- else:\n- if len(srcs) > 1:\n- raise MesonException(msg + ' more than one input file')\n- i = i.replace('@INPUT@', srcs[0])\n- elif '@OUTPUT@' in i:\n- msg = 'Custom target {} has @OUTPUT@ in the command, but'.format(target.name)\n- if len(ofilenames) == 0:\n- raise MesonException(msg + ' no output files')\n- if i == '@OUTPUT@':\n- cmd += ofilenames\n- continue\n- else:\n- if len(ofilenames) > 1:\n- raise MesonException(msg + ' more than one output file')\n- i = i.replace('@OUTPUT@', ofilenames[0])\n- elif '@OUTDIR@' in i:\n- i = i.replace('@OUTDIR@', outdir)\n elif '@DEPFILE@' in i:\n if target.depfile is None:\n msg = 'Custom target {!r} has @DEPFILE@ but no depfile ' \\\n@@ -680,10 +649,11 @@ class Backend:\n lead_dir = ''\n else:\n lead_dir = self.environment.get_build_dir()\n- i = i.replace(source,\n- os.path.join(lead_dir,\n- outdir))\n+ i = i.replace(source, os.path.join(lead_dir, outdir))\n cmd.append(i)\n+ # Substitute the rest of the template strings\n+ values = mesonlib.get_filenames_templates_dict(inputs, outputs)\n+ cmd = mesonlib.substitute_values(cmd, values)\n # This should not be necessary but removing it breaks\n # building GStreamer on Windows. The underlying issue\n # is problems with quoting backslashes on Windows\n@@ -703,7 +673,7 @@ class Backend:\n #\n # https://github.com/mesonbuild/meson/pull/737\n cmd = [i.replace('\\\\', '/') for i in cmd]\n- return srcs, ofilenames, cmd\n+ return inputs, outputs, cmd\n \n def run_postconf_scripts(self):\n env = {'MESON_SOURCE_ROOT': self.environment.get_source_dir(),\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/build.py", "new_path": "mesonbuild/build.py", "diff": "@@ -1530,3 +1530,22 @@ class TestSetup:\n self.gdb = gdb\n self.timeout_multiplier = timeout_multiplier\n self.env = env\n+\n+def get_sources_output_names(sources):\n+ '''\n+ For the specified list of @sources which can be strings, Files, or targets,\n+ get all the output basenames.\n+ '''\n+ names = []\n+ for s in sources:\n+ if hasattr(s, 'held_object'):\n+ s = s.held_object\n+ if isinstance(s, str):\n+ names.append(s)\n+ elif isinstance(s, (BuildTarget, CustomTarget, GeneratedList)):\n+ names += s.get_outputs()\n+ elif isinstance(s, File):\n+ names.append(s.fname)\n+ else:\n+ raise AssertionError('Unknown source type: {!r}'.format(s))\n+ return names\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mesonlib.py", "new_path": "mesonbuild/mesonlib.py", "diff": "@@ -521,3 +521,154 @@ def commonpath(paths):\n new = os.path.join(*new)\n common = pathlib.PurePath(new)\n return str(common)\n+\n+def iter_regexin_iter(regexiter, initer):\n+ '''\n+ Takes each regular expression in @regexiter and tries to search for it in\n+ every item in @initer. If there is a match, returns that match.\n+ Else returns False.\n+ '''\n+ for regex in regexiter:\n+ for ii in initer:\n+ if not isinstance(ii, str):\n+ continue\n+ match = re.search(regex, ii)\n+ if match:\n+ return match.group()\n+ return False\n+\n+def _substitute_values_check_errors(command, values):\n+ # Error checking\n+ inregex = ('@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@')\n+ outregex = ('@OUTPUT([0-9]+)?@', '@OUTDIR@')\n+ if '@INPUT@' not in values:\n+ # Error out if any input-derived templates are present in the command\n+ match = iter_regexin_iter(inregex, command)\n+ if match:\n+ m = 'Command cannot have {!r}, since no input files were specified'\n+ raise MesonException(m.format(match))\n+ else:\n+ if len(values['@INPUT@']) > 1:\n+ # Error out if @PLAINNAME@ or @BASENAME@ is present in the command\n+ match = iter_regexin_iter(inregex[1:], command)\n+ if match:\n+ raise MesonException('Command cannot have {!r} when there is '\n+ 'more than one input file'.format(match))\n+ # Error out if an invalid @INPUTnn@ template was specified\n+ for each in command:\n+ if not isinstance(each, str):\n+ continue\n+ match = re.search(inregex[0], each)\n+ if match and match.group() not in values:\n+ m = 'Command cannot have {!r} since there are only {!r} inputs'\n+ raise MesonException(m.format(match.group(), len(values['@INPUT@'])))\n+ if '@OUTPUT@' not in values:\n+ # Error out if any output-derived templates are present in the command\n+ match = iter_regexin_iter(outregex, command)\n+ if match:\n+ m = 'Command cannot have {!r} since there are no outputs'\n+ raise MesonException(m.format(match))\n+ else:\n+ # Error out if an invalid @OUTPUTnn@ template was specified\n+ for each in command:\n+ if not isinstance(each, str):\n+ continue\n+ match = re.search(outregex[0], each)\n+ if match and match.group() not in values:\n+ m = 'Command cannot have {!r} since there are only {!r} outputs'\n+ raise MesonException(m.format(match.group(), len(values['@OUTPUT@'])))\n+\n+def substitute_values(command, values):\n+ '''\n+ Substitute the template strings in the @values dict into the list of\n+ strings @command and return a new list. For a full list of the templates,\n+ see get_filenames_templates_dict()\n+\n+ If multiple inputs/outputs are given in the @values dictionary, we\n+ substitute @INPUT@ and @OUTPUT@ only if they are the entire string, not\n+ just a part of it, and in that case we substitute *all* of them.\n+ '''\n+ # Error checking\n+ _substitute_values_check_errors(command, values)\n+ # Substitution\n+ outcmd = []\n+ for vv in command:\n+ if not isinstance(vv, str):\n+ outcmd.append(vv)\n+ elif '@INPUT@' in vv:\n+ inputs = values['@INPUT@']\n+ if vv == '@INPUT@':\n+ outcmd += inputs\n+ elif len(inputs) == 1:\n+ outcmd.append(vv.replace('@INPUT@', inputs[0]))\n+ else:\n+ raise MesonException(\"Command has '@INPUT@' as part of a \"\n+ \"string and more than one input file\")\n+ elif '@OUTPUT@' in vv:\n+ outputs = values['@OUTPUT@']\n+ if vv == '@OUTPUT@':\n+ outcmd += outputs\n+ elif len(outputs) == 1:\n+ outcmd.append(vv.replace('@OUTPUT@', outputs[0]))\n+ else:\n+ raise MesonException(\"Command has '@OUTPUT@' as part of a \"\n+ \"string and more than one output file\")\n+ # Append values that are exactly a template string.\n+ # This is faster than a string replace.\n+ elif vv in values:\n+ outcmd.append(values[vv])\n+ # Substitute everything else with replacement\n+ else:\n+ for key, value in values.items():\n+ if key in ('@INPUT@', '@OUTPUT@'):\n+ # Already done above\n+ continue\n+ vv = vv.replace(key, value)\n+ outcmd.append(vv)\n+ return outcmd\n+\n+def get_filenames_templates_dict(inputs, outputs):\n+ '''\n+ Create a dictionary with template strings as keys and values as values for\n+ the following templates:\n+\n+ @INPUT@ - the full path to one or more input files, from @inputs\n+ @OUTPUT@ - the full path to one or more output files, from @outputs\n+ @OUTDIR@ - the full path to the directory containing the output files\n+\n+ If there is only one input file, the following keys are also created:\n+\n+ @PLAINNAME@ - the filename of the input file\n+ @BASENAME@ - the filename of the input file with the extension removed\n+\n+ If there is more than one input file, the following keys are also created:\n+\n+ @INPUT0@, @INPUT1@, ... one for each input file\n+\n+ If there is more than one output file, the following keys are also created:\n+\n+ @OUTPUT0@, @OUTPUT1@, ... one for each output file\n+ '''\n+ values = {}\n+ # Gather values derived from the input\n+ if inputs:\n+ # We want to substitute all the inputs.\n+ values['@INPUT@'] = inputs\n+ for (ii, vv) in enumerate(inputs):\n+ # Write out @INPUT0@, @INPUT1@, ...\n+ values['@INPUT{}@'.format(ii)] = vv\n+ if len(inputs) == 1:\n+ # Just one value, substitute @PLAINNAME@ and @BASENAME@\n+ values['@PLAINNAME@'] = plain = os.path.split(inputs[0])[1]\n+ values['@BASENAME@'] = os.path.splitext(plain)[0]\n+ if outputs:\n+ # Gather values derived from the outputs, similar to above.\n+ values['@OUTPUT@'] = outputs\n+ for (ii, vv) in enumerate(outputs):\n+ values['@OUTPUT{}@'.format(ii)] = vv\n+ # Outdir should be the same for all outputs\n+ values['@OUTDIR@'] = os.path.split(outputs[0])[0]\n+ # Many external programs fail on empty arguments.\n+ if values['@OUTDIR@'] == '':\n+ values['@OUTDIR@'] = '.'\n+ return values\n" }, { "change_type": "MODIFY", "old_path": "run_unittests.py", "new_path": "run_unittests.py", "diff": "@@ -174,6 +174,157 @@ class InternalTests(unittest.TestCase):\n libdir = '/some/path/to/prefix/libdir'\n self.assertEqual(commonpath([prefix, libdir]), str(pathlib.PurePath(prefix)))\n \n+ def test_string_templates_substitution(self):\n+ dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict\n+ substfunc = mesonbuild.mesonlib.substitute_values\n+ ME = mesonbuild.mesonlib.MesonException\n+\n+ # Identity\n+ self.assertEqual(dictfunc([], []), {})\n+\n+ # One input, no outputs\n+ inputs = ['bar/foo.c.in']\n+ outputs = []\n+ ret = dictfunc(inputs, outputs)\n+ d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],\n+ '@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}\n+ # Check dictionary\n+ self.assertEqual(ret, d)\n+ # Check substitutions\n+ cmd = ['some', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), cmd)\n+ cmd = ['@INPUT@.out', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])\n+ cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']\n+ self.assertEqual(substfunc(cmd, d),\n+ [inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])\n+ cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']\n+ self.assertEqual(substfunc(cmd, d),\n+ inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])\n+ cmd = ['@OUTPUT@']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+\n+ # One input, one output\n+ inputs = ['bar/foo.c.in']\n+ outputs = ['out.c']\n+ ret = dictfunc(inputs, outputs)\n+ d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],\n+ '@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',\n+ '@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}\n+ # Check dictionary\n+ self.assertEqual(ret, d)\n+ # Check substitutions\n+ cmd = ['some', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), cmd)\n+ cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']\n+ self.assertEqual(substfunc(cmd, d),\n+ [inputs[0] + '.out'] + outputs + cmd[2:])\n+ cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']\n+ self.assertEqual(substfunc(cmd, d),\n+ [inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)\n+ cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']\n+ self.assertEqual(substfunc(cmd, d),\n+ inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])\n+\n+ # One input, one output with a subdir\n+ outputs = ['dir/out.c']\n+ ret = dictfunc(inputs, outputs)\n+ d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],\n+ '@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',\n+ '@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}\n+ # Check dictionary\n+ self.assertEqual(ret, d)\n+\n+ # Two inputs, no outputs\n+ inputs = ['bar/foo.c.in', 'baz/foo.c.in']\n+ outputs = []\n+ ret = dictfunc(inputs, outputs)\n+ d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}\n+ # Check dictionary\n+ self.assertEqual(ret, d)\n+ # Check substitutions\n+ cmd = ['some', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), cmd)\n+ cmd = ['@INPUT@', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])\n+ cmd = ['@INPUT0@.out', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])\n+ cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']\n+ self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])\n+ cmd = ['@INPUT0@', '@INPUT1@', 'strings']\n+ self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])\n+ # Many inputs, can't use @INPUT@ like this\n+ cmd = ['@INPUT@.out', 'ordinary', 'strings']\n+ # Not enough inputs\n+ cmd = ['@INPUT2@.out', 'ordinary', 'strings']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+ # Too many inputs\n+ cmd = ['@PLAINNAME@']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+ cmd = ['@BASENAME@']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+ # No outputs\n+ cmd = ['@OUTPUT@']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+ cmd = ['@OUTPUT0@']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+ cmd = ['@OUTDIR@']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+\n+ # Two inputs, one output\n+ outputs = ['dir/out.c']\n+ ret = dictfunc(inputs, outputs)\n+ d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],\n+ '@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}\n+ # Check dictionary\n+ self.assertEqual(ret, d)\n+ # Check substitutions\n+ cmd = ['some', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), cmd)\n+ cmd = ['@OUTPUT@', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])\n+ cmd = ['@OUTPUT@.out', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])\n+ cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']\n+ self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])\n+ # Many inputs, can't use @INPUT@ like this\n+ cmd = ['@INPUT@.out', 'ordinary', 'strings']\n+ # Not enough inputs\n+ cmd = ['@INPUT2@.out', 'ordinary', 'strings']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+ # Not enough outputs\n+ cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+\n+ # Two inputs, two outputs\n+ outputs = ['dir/out.c', 'dir/out2.c']\n+ ret = dictfunc(inputs, outputs)\n+ d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],\n+ '@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],\n+ '@OUTDIR@': 'dir'}\n+ # Check dictionary\n+ self.assertEqual(ret, d)\n+ # Check substitutions\n+ cmd = ['some', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), cmd)\n+ cmd = ['@OUTPUT@', 'ordinary', 'strings']\n+ self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])\n+ cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']\n+ self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])\n+ cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']\n+ self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])\n+ # Many inputs, can't use @INPUT@ like this\n+ cmd = ['@INPUT@.out', 'ordinary', 'strings']\n+ # Not enough inputs\n+ cmd = ['@INPUT2@.out', 'ordinary', 'strings']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+ # Not enough outputs\n+ cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+ # Many outputs, can't use @OUTPUT@ like this\n+ cmd = ['@OUTPUT@.out', 'ordinary', 'strings']\n+ self.assertRaises(ME, substfunc, cmd, d)\n+\n \n class LinuxlikeTests(unittest.TestCase):\n def setUp(self):\n" } ]
003e0a0610582020d1b213e0c8d16fe63bc6eabe
mesonbuild/meson
20.02.2017 07:06:13
Apache License 2.0
Use the same function for detection of C and C++ compilers The mechanism is identical which means there's a high likelihood of unintended divergence. In fact, a slight divergence was already there.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/environment.py", "new_path": "mesonbuild/environment.py", "diff": "@@ -400,9 +400,9 @@ class Environment:\n errmsg += '\\nRunning \"{0}\" gave \"{1}\"'.format(c, e)\n raise EnvironmentException(errmsg)\n \n- def detect_c_compiler(self, want_cross):\n+ def _detect_c_or_cpp_compiler(self, lang, evar, want_cross):\n popen_exceptions = {}\n- compilers, ccache, is_cross, exe_wrap = self._get_compilers('c', 'CC', want_cross)\n+ compilers, ccache, is_cross, exe_wrap = self._get_compilers(lang, evar, want_cross)\n for compiler in compilers:\n if isinstance(compiler, str):\n compiler = [compiler]\n@@ -424,24 +424,34 @@ class Environment:\n continue\n gtype = self.get_gnu_compiler_type(defines)\n version = self.get_gnu_version_from_defines(defines)\n- return GnuCCompiler(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n+ cls = GnuCCompiler if lang == 'c' else GnuCPPCompiler\n+ return cls(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n if 'clang' in out:\n if 'Apple' in out or for_darwin(want_cross, self):\n cltype = CLANG_OSX\n else:\n cltype = CLANG_STANDARD\n- return ClangCCompiler(ccache + compiler, version, cltype, is_cross, exe_wrap)\n+ cls = ClangCCompiler if lang == 'c' else ClangCPPCompiler\n+ return cls(ccache + compiler, version, cltype, is_cross, exe_wrap)\n if 'Microsoft' in out or 'Microsoft' in err:\n # Visual Studio prints version number to stderr but\n # everything else to stdout. Why? Lord only knows.\n version = search_version(err)\n- return VisualStudioCCompiler(compiler, version, is_cross, exe_wrap)\n+ cls = VisualStudioCCompiler if lang == 'c' else VisualStudioCPPCompiler\n+ return cls(compiler, version, is_cross, exe_wrap)\n if '(ICC)' in out:\n # TODO: add microsoft add check OSX\n inteltype = ICC_STANDARD\n- return IntelCCompiler(ccache + compiler, version, inteltype, is_cross, exe_wrap)\n+ cls = IntelCCompiler if lang == 'c' else IntelCPPCompiler\n+ return cls(ccache + compiler, version, inteltype, is_cross, exe_wrap)\n self._handle_compiler_exceptions(popen_exceptions, compilers)\n \n+ def detect_c_compiler(self, want_cross):\n+ return self._detect_c_or_cpp_compiler('c', 'CC', want_cross)\n+\n+ def detect_cpp_compiler(self, want_cross):\n+ return self._detect_c_or_cpp_compiler('cpp', 'CXX', want_cross)\n+\n def detect_fortran_compiler(self, want_cross):\n popen_exceptions = {}\n compilers, ccache, is_cross, exe_wrap = self._get_compilers('fortran', 'FC', want_cross)\n@@ -496,46 +506,6 @@ class Environment:\n path = os.path.split(__file__)[0]\n return os.path.join(path, 'depfixer.py')\n \n- def detect_cpp_compiler(self, want_cross):\n- popen_exceptions = {}\n- compilers, ccache, is_cross, exe_wrap = self._get_compilers('cpp', 'CXX', want_cross)\n- for compiler in compilers:\n- if isinstance(compiler, str):\n- compiler = [compiler]\n- basename = os.path.basename(compiler[-1]).lower()\n- if basename == 'cl' or basename == 'cl.exe':\n- arg = '/?'\n- else:\n- arg = '--version'\n- try:\n- p, out, err = Popen_safe(compiler + [arg])\n- except OSError as e:\n- popen_exceptions[' '.join(compiler + [arg])] = e\n- continue\n- version = search_version(out)\n- if 'Free Software Foundation' in out:\n- defines = self.get_gnu_compiler_defines(compiler)\n- if not defines:\n- popen_exceptions[compiler] = 'no pre-processor defines'\n- continue\n- gtype = self.get_gnu_compiler_type(defines)\n- version = self.get_gnu_version_from_defines(defines)\n- return GnuCPPCompiler(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n- if 'clang' in out:\n- if 'Apple' in out:\n- cltype = CLANG_OSX\n- else:\n- cltype = CLANG_STANDARD\n- return ClangCPPCompiler(ccache + compiler, version, cltype, is_cross, exe_wrap)\n- if 'Microsoft' in out or 'Microsoft' in err:\n- version = search_version(err)\n- return VisualStudioCPPCompiler(compiler, version, is_cross, exe_wrap)\n- if '(ICC)' in out:\n- # TODO: add microsoft add check OSX\n- inteltype = ICC_STANDARD\n- return IntelCPPCompiler(ccache + compiler, version, inteltype, is_cross, exe_wrap)\n- self._handle_compiler_exceptions(popen_exceptions, compilers)\n-\n def detect_objc_compiler(self, want_cross):\n popen_exceptions = {}\n compilers, ccache, is_cross, exe_wrap = self._get_compilers('objc', 'OBJC', want_cross)\n" } ]
1fbf6300c5d38b12a4347a9327e54a9a315ef8de
mesonbuild/meson
10.04.2017 23:36:06
Apache License 2.0
Use an enum instead of strings for method names. If a non-string value is passed as a method, reject this explicitly with a clear error message rather than trying to match with it and failing.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/dependencies.py", "new_path": "mesonbuild/dependencies.py", "diff": "@@ -24,6 +24,7 @@ import sys\n import os, stat, glob, shutil\n import subprocess\n import sysconfig\n+from enum import Enum\n from collections import OrderedDict\n from . mesonlib import MesonException, version_compare, version_compare_many, Popen_safe\n from . import mlog\n@@ -33,21 +34,35 @@ from .environment import detect_cpu_family, for_windows\n class DependencyException(MesonException):\n '''Exceptions raised while trying to find dependencies'''\n \n+class DependencyMethods(Enum):\n+ # Auto means to use whatever dependency checking mechanisms in whatever order meson thinks is best.\n+ AUTO = 'auto'\n+ PKGCONFIG = 'pkg-config'\n+ QMAKE = 'qmake'\n+ # Just specify the standard link arguments, assuming the operating system provides the library.\n+ SYSTEM = 'system'\n+ # Detect using sdl2-config\n+ SDLCONFIG = 'sdlconfig'\n+ # This is only supported on OSX - search the frameworks directory by name.\n+ EXTRAFRAMEWORK = 'extraframework'\n+ # Detect using the sysconfig module.\n+ SYSCONFIG = 'sysconfig'\n+\n class Dependency:\n def __init__(self, type_name, kwargs):\n self.name = \"null\"\n self.is_found = False\n self.type_name = type_name\n- method = kwargs.get('method', 'auto')\n+ method = DependencyMethods(kwargs.get('method', 'auto'))\n \n # Set the detection method. If the method is set to auto, use any available method.\n # If method is set to a specific string, allow only that detection method.\n- if method == \"auto\":\n+ if method == DependencyMethods.AUTO:\n self.methods = self.get_methods()\n elif method in self.get_methods():\n self.methods = [method]\n else:\n- raise MesonException('Unsupported detection method: {}, allowed methods are {}'.format(method, mlog.format_list([\"auto\"] + self.get_methods())))\n+ raise MesonException('Unsupported detection method: {}, allowed methods are {}'.format(method.value, mlog.format_list(map(lambda x: x.value, [DependencyMethods.AUTO] + self.get_methods()))))\n \n def __repr__(self):\n s = '<{0} {1}: {2}>'\n@@ -68,7 +83,7 @@ class Dependency:\n return []\n \n def get_methods(self):\n- return ['auto']\n+ return [DependencyMethods.AUTO]\n \n def get_name(self):\n return self.name\n@@ -268,7 +283,7 @@ class PkgConfigDependency(Dependency):\n return self.libs\n \n def get_methods(self):\n- return ['pkg-config']\n+ return [DependencyMethods.PKGCONFIG]\n \n def check_pkgconfig(self):\n evar = 'PKG_CONFIG'\n@@ -985,10 +1000,10 @@ class QtBaseDependency(Dependency):\n # Keep track of the detection methods used, for logging purposes.\n methods = []\n # Prefer pkg-config, then fallback to `qmake -query`\n- if 'pkg-config' in self.methods:\n+ if DependencyMethods.PKGCONFIG in self.methods:\n self._pkgconfig_detect(mods, env, kwargs)\n methods.append('pkgconfig')\n- if not self.is_found and 'qmake' in self.methods:\n+ if not self.is_found and DependencyMethods.QMAKE in self.methods:\n from_text = self._qmake_detect(mods, env, kwargs)\n methods.append('qmake-' + self.name)\n methods.append('qmake')\n@@ -1137,7 +1152,7 @@ class QtBaseDependency(Dependency):\n return self.largs\n \n def get_methods(self):\n- return ['pkg-config', 'qmake']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.QMAKE]\n \n def found(self):\n return self.is_found\n@@ -1301,7 +1316,7 @@ class GLDependency(Dependency):\n self.is_found = False\n self.cargs = []\n self.linkargs = []\n- if 'pkg-config' in self.methods:\n+ if DependencyMethods.PKGCONFIG in self.methods:\n try:\n pcdep = PkgConfigDependency('gl', environment, kwargs)\n if pcdep.found():\n@@ -1313,7 +1328,7 @@ class GLDependency(Dependency):\n return\n except Exception:\n pass\n- if 'system' in self.methods:\n+ if DependencyMethods.SYSTEM in self.methods:\n if mesonlib.is_osx():\n self.is_found = True\n self.linkargs = ['-framework', 'OpenGL']\n@@ -1333,9 +1348,9 @@ class GLDependency(Dependency):\n \n def get_methods(self):\n if mesonlib.is_osx() or mesonlib.is_windows():\n- return ['pkg-config', 'system']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM]\n else:\n- return ['pkg-config']\n+ return [DependencyMethods.PKGCONFIG]\n \n # There are three different ways of depending on SDL2:\n # sdl2-config, pkg-config and OSX framework\n@@ -1345,7 +1360,7 @@ class SDL2Dependency(Dependency):\n self.is_found = False\n self.cargs = []\n self.linkargs = []\n- if 'pkg-config' in self.methods:\n+ if DependencyMethods.PKGCONFIG in self.methods:\n try:\n pcdep = PkgConfigDependency('sdl2', environment, kwargs)\n if pcdep.found():\n@@ -1358,7 +1373,7 @@ class SDL2Dependency(Dependency):\n except Exception as e:\n mlog.debug('SDL 2 not found via pkgconfig. Trying next, error was:', str(e))\n pass\n- if 'sdlconfig' in self.methods:\n+ if DependencyMethods.SDLCONFIG in self.methods:\n sdlconf = shutil.which('sdl2-config')\n if sdlconf:\n stdo = Popen_safe(['sdl2-config', '--cflags'])[1]\n@@ -1372,7 +1387,7 @@ class SDL2Dependency(Dependency):\n self.version, '(%s)' % sdlconf)\n return\n mlog.debug('Could not find sdl2-config binary, trying next.')\n- if 'extraframework' in self.methods:\n+ if DependencyMethods.EXTRAFRAMEWORK in self.methods:\n if mesonlib.is_osx():\n fwdep = ExtraFrameworkDependency('sdl2', kwargs.get('required', True), None, kwargs)\n if fwdep.found():\n@@ -1397,9 +1412,9 @@ class SDL2Dependency(Dependency):\n \n def get_methods(self):\n if mesonlib.is_osx():\n- return ['pkg-config', 'sdlconfig', 'extraframework']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.SDLCONFIG, DependencyMethods.EXTRAFRAMEWORK]\n else:\n- return ['pkg-config', 'sdlconfig']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.SDLCONFIG]\n \n class ExtraFrameworkDependency(Dependency):\n def __init__(self, name, required, path, kwargs):\n@@ -1465,7 +1480,7 @@ class Python3Dependency(Dependency):\n self.is_found = False\n # We can only be sure that it is Python 3 at this point\n self.version = '3'\n- if 'pkg-config' in self.methods:\n+ if DependencyMethods.PKGCONFIG in self.methods:\n try:\n pkgdep = PkgConfigDependency('python3', environment, kwargs)\n if pkgdep.found():\n@@ -1477,9 +1492,9 @@ class Python3Dependency(Dependency):\n except Exception:\n pass\n if not self.is_found:\n- if mesonlib.is_windows() and 'sysconfig' in self.methods:\n+ if mesonlib.is_windows() and DependencyMethods.SYSCONFIG in self.methods:\n self._find_libpy3_windows(environment)\n- elif mesonlib.is_osx() and 'extraframework' in self.methods:\n+ elif mesonlib.is_osx() and DependencyMethods.EXTRAFRAMEWORK in self.methods:\n # In OSX the Python 3 framework does not have a version\n # number in its name.\n fw = ExtraFrameworkDependency('python', False, None, kwargs)\n@@ -1536,11 +1551,11 @@ class Python3Dependency(Dependency):\n \n def get_methods(self):\n if mesonlib.is_windows():\n- return ['pkg-config', 'sysconfig']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]\n elif mesonlib.is_osx():\n- return ['pkg-config', 'extraframework']\n+ return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]\n else:\n- return ['pkg-config']\n+ return [DependencyMethods.PKGCONFIG]\n \n def get_version(self):\n return self.version\n@@ -1574,6 +1589,8 @@ def find_external_dependency(name, environment, kwargs):\n required = kwargs.get('required', True)\n if not isinstance(required, bool):\n raise DependencyException('Keyword \"required\" must be a boolean.')\n+ if not isinstance(kwargs.get('method', ''), str):\n+ raise DependencyException('Keyword \"method\" must be a string.')\n lname = name.lower()\n if lname in packages:\n dep = packages[lname](environment, kwargs)\n" } ]
fab5634916191816ddecf1a2a958fa7ed2eac1ec
mesonbuild/meson
24.06.2017 20:16:30
Apache License 2.0
Add 'Compiler.get_display_language' Use this when we print language-related information to the console and via the Ninja backend.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/ninjabackend.py", "new_path": "mesonbuild/backend/ninjabackend.py", "diff": "@@ -1606,7 +1606,7 @@ rule FORTRAN_DEP_HACK\n output_args=' '.join(compiler.get_output_args('$out')),\n compile_only_args=' '.join(compiler.get_compile_only_args())\n )\n- description = ' description = Compiling %s object $out.\\n' % langname.title()\n+ description = ' description = Compiling %s object $out.\\n' % compiler.get_display_language()\n if compiler.get_id() == 'msvc':\n deps = ' deps = msvc\\n'\n else:\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/c.py", "new_path": "mesonbuild/compilers/c.py", "diff": "@@ -179,7 +179,7 @@ class CCompiler(Compiler):\n return ['-Wl,--out-implib=' + implibname]\n \n def sanity_check_impl(self, work_dir, environment, sname, code):\n- mlog.debug('Sanity testing ' + self.language + ' compiler:', ' '.join(self.exelist))\n+ mlog.debug('Sanity testing ' + self.get_display_language() + ' compiler:', ' '.join(self.exelist))\n mlog.debug('Is cross compiler: %s.' % str(self.is_cross))\n \n extra_flags = []\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/compilers.py", "new_path": "mesonbuild/compilers/compilers.py", "diff": "@@ -584,6 +584,9 @@ class Compiler:\n def get_language(self):\n return self.language\n \n+ def get_display_language(self):\n+ return self.language.capitalize()\n+\n def get_default_suffix(self):\n return self.default_suffix\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/cpp.py", "new_path": "mesonbuild/compilers/cpp.py", "diff": "@@ -32,6 +32,9 @@ class CPPCompiler(CCompiler):\n self.language = 'cpp'\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)\n \n+ def get_display_language(self):\n+ return 'C++'\n+\n def get_no_stdinc_args(self):\n return ['-nostdinc++']\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/cs.py", "new_path": "mesonbuild/compilers/cs.py", "diff": "@@ -25,6 +25,9 @@ class MonoCompiler(Compiler):\n self.id = 'mono'\n self.monorunner = 'mono'\n \n+ def get_display_language(self):\n+ return 'C#'\n+\n def get_output_args(self, fname):\n return ['-out:' + fname]\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/objc.py", "new_path": "mesonbuild/compilers/objc.py", "diff": "@@ -24,6 +24,9 @@ class ObjCCompiler(CCompiler):\n self.language = 'objc'\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)\n \n+ def get_display_language(self):\n+ return 'Objective-C'\n+\n def sanity_check(self, work_dir, environment):\n # TODO try to use sanity_check_impl instead of duplicated code\n source_name = os.path.join(work_dir, 'sanitycheckobjc.m')\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/objcpp.py", "new_path": "mesonbuild/compilers/objcpp.py", "diff": "@@ -24,6 +24,9 @@ class ObjCPPCompiler(CPPCompiler):\n self.language = 'objcpp'\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap)\n \n+ def get_display_language(self):\n+ return 'Objective-C++'\n+\n def sanity_check(self, work_dir, environment):\n # TODO try to use sanity_check_impl instead of duplicated code\n source_name = os.path.join(work_dir, 'sanitycheckobjcpp.mm')\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -741,7 +741,7 @@ class CompilerHolder(InterpreterObject):\n def unittest_args_method(self, args, kwargs):\n # At time, only D compilers have this feature.\n if not hasattr(self.compiler, 'get_unittest_args'):\n- raise InterpreterException('This {} compiler has no unittest arguments.'.format(self.compiler.language))\n+ raise InterpreterException('This {} compiler has no unittest arguments.'.format(self.compiler.get_display_language()))\n return self.compiler.get_unittest_args()\n \n def has_member_method(self, args, kwargs):\n@@ -971,8 +971,7 @@ class CompilerHolder(InterpreterObject):\n raise InvalidCode('Search directory %s is not an absolute path.' % i)\n linkargs = self.compiler.find_library(libname, self.environment, search_dirs)\n if required and not linkargs:\n- l = self.compiler.language.capitalize()\n- raise InterpreterException('{} library {!r} not found'.format(l, libname))\n+ raise InterpreterException('{} library {!r} not found'.format(self.compiler.get_display_language(), libname))\n lib = dependencies.ExternalLibrary(libname, linkargs, self.environment,\n self.compiler.language)\n return ExternalLibraryHolder(lib)\n@@ -986,7 +985,7 @@ class CompilerHolder(InterpreterObject):\n h = mlog.green('YES')\n else:\n h = mlog.red('NO')\n- mlog.log('Compiler for {} supports argument {}:'.format(self.compiler.language, args[0]), h)\n+ mlog.log('Compiler for {} supports argument {}:'.format(self.compiler.get_display_language(), args[0]), h)\n return result\n \n def has_multi_arguments_method(self, args, kwargs):\n@@ -998,7 +997,7 @@ class CompilerHolder(InterpreterObject):\n h = mlog.red('NO')\n mlog.log(\n 'Compiler for {} supports arguments {}:'.format(\n- self.compiler.language, ' '.join(args)),\n+ self.compiler.get_display_language(), ' '.join(args)),\n h)\n return result\n \n@@ -1794,7 +1793,7 @@ class Interpreter(InterpreterBase):\n continue\n else:\n raise\n- mlog.log('Native %s compiler: ' % lang, mlog.bold(' '.join(comp.get_exelist())), ' (%s %s)' % (comp.id, comp.version), sep='')\n+ mlog.log('Native %s compiler: ' % comp.get_display_language(), mlog.bold(' '.join(comp.get_exelist())), ' (%s %s)' % (comp.id, comp.version), sep='')\n if not comp.get_language() in self.coredata.external_args:\n (preproc_args, compile_args, link_args) = environment.get_args_from_envvars(comp)\n self.coredata.external_preprocess_args[comp.get_language()] = preproc_args\n@@ -1802,7 +1801,7 @@ class Interpreter(InterpreterBase):\n self.coredata.external_link_args[comp.get_language()] = link_args\n self.build.add_compiler(comp)\n if need_cross_compiler:\n- mlog.log('Cross %s compiler: ' % lang, mlog.bold(' '.join(cross_comp.get_exelist())), ' (%s %s)' % (cross_comp.id, cross_comp.version), sep='')\n+ mlog.log('Cross %s compiler: ' % cross_comp.get_display_language(), mlog.bold(' '.join(cross_comp.get_exelist())), ' (%s %s)' % (cross_comp.id, cross_comp.version), sep='')\n self.build.add_cross_compiler(cross_comp)\n if self.environment.is_cross_build() and not need_cross_compiler:\n self.build.add_cross_compiler(comp)\n" } ]
cda0e33650341f0a82c7d4164607fd74805e670f
mesonbuild/meson
18.10.2017 22:39:05
Apache License 2.0
Add ConfigToolDependency class This class is meant abstract away some of the tedium of writing a config tool wrapper dependency, and allow these instances to share some basic code that they all need.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/dependencies/base.py", "new_path": "mesonbuild/dependencies/base.py", "diff": "@@ -24,7 +24,9 @@ from enum import Enum\n \n from .. import mlog\n from .. import mesonlib\n-from ..mesonlib import MesonException, Popen_safe, version_compare_many, listify\n+from ..mesonlib import (\n+ MesonException, Popen_safe, version_compare_many, version_compare, listify\n+)\n \n \n # These must be defined in this file to avoid cyclical references.\n@@ -55,6 +57,8 @@ class DependencyMethods(Enum):\n EXTRAFRAMEWORK = 'extraframework'\n # Detect using the sysconfig module.\n SYSCONFIG = 'sysconfig'\n+ # Specify using a \"program\"-config style tool\n+ CONFIG_TOOL = 'config-tool'\n \n \n class Dependency:\n@@ -167,6 +171,94 @@ class ExternalDependency(Dependency):\n return self.compiler\n \n \n+class ConfigToolDependency(ExternalDependency):\n+\n+ \"\"\"Class representing dependencies found using a config tool.\"\"\"\n+\n+ tools = None\n+ tool_name = None\n+\n+ def __init__(self, name, environment, language, kwargs):\n+ super().__init__('config-tool', environment, language, kwargs)\n+ self.name = name\n+ self.tools = listify(kwargs.get('tools', self.tools))\n+\n+ req_version = kwargs.get('version', None)\n+ tool, version = self.find_config(req_version)\n+ self.config = tool\n+ self.is_found = self.report_config(version, req_version)\n+ if not self.is_found:\n+ self.config = None\n+ return\n+ self.version = version\n+\n+ def find_config(self, versions=None):\n+ \"\"\"Helper method that searchs for config tool binaries in PATH and\n+ returns the one that best matches the given version requirements.\n+ \"\"\"\n+ if not isinstance(versions, list) and versions is not None:\n+ versions = listify(versions)\n+\n+ best_match = (None, None)\n+ for tool in self.tools:\n+ try:\n+ p, out = Popen_safe([tool, '--version'])[:2]\n+ except (FileNotFoundError, PermissionError):\n+ continue\n+ if p.returncode != 0:\n+ continue\n+\n+ out = out.strip()\n+ # Some tools, like pcap-config don't supply a version, but also\n+ # dont fail with --version, in that case just assume that there is\n+ # only one verison and return it.\n+ if not out:\n+ return (tool, 'none')\n+ if versions:\n+ is_found = version_compare_many(out, versions)[0]\n+ # This allows returning a found version without a config tool,\n+ # which is useful to inform the user that you found version x,\n+ # but y was required.\n+ if not is_found:\n+ tool = None\n+ if best_match[1]:\n+ if version_compare(out, '> {}'.format(best_match[1])):\n+ best_match = (tool, out)\n+ else:\n+ best_match = (tool, out)\n+\n+ return best_match\n+\n+ def report_config(self, version, req_version):\n+ \"\"\"Helper method to print messages about the tool.\"\"\"\n+ if self.config is None:\n+ if version is not None:\n+ mlog.log('found {} {!r} but need:'.format(self.tool_name, version),\n+ req_version)\n+ else:\n+ mlog.log(\"No {} found; can't detect dependency\".format(self.tool_name))\n+ mlog.log('Dependency {} found:'.format(self.name), mlog.red('NO'))\n+ if self.required:\n+ raise DependencyException('Dependency {} not found'.format(self.name))\n+ return False\n+ mlog.log('Found {}:'.format(self.tool_name), mlog.bold(shutil.which(self.config)),\n+ '({})'.format(version))\n+ mlog.log('Dependency {} found:'.format(self.name), mlog.green('YES'))\n+ return True\n+\n+ def get_config_value(self, args, stage):\n+ p, out, _ = Popen_safe([self.config] + args)\n+ if p.returncode != 0:\n+ if self.required:\n+ raise DependencyException('Could not generate {} for {}'.format(\n+ stage, self.name))\n+ return []\n+ return shlex.split(out)\n+\n+ def get_methods(self):\n+ return [DependencyMethods.AUTO, DependencyMethods.CONFIG_TOOL]\n+\n+\n class PkgConfigDependency(ExternalDependency):\n # The class's copy of the pkg-config path. Avoids having to search for it\n # multiple times in the same Meson invocation.\n" } ]
cf98f5e3705603ae21bef9b0a577bcd001a8c92e
mesonbuild/meson
21.02.2018 13:39:52
Apache License 2.0
Enable searching system crossfile locations on more platforms There's no reason not to also look in these places on Cygwin or OSX. Don't do this on Windows, as these paths aren't meaningful there. Move test_cross_file_system_paths from LinuxlikeTests to AllPlatformTests.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/coredata.py", "new_path": "mesonbuild/coredata.py", "diff": "@@ -222,17 +222,17 @@ class CoreData:\n (after resolving variables and ~), return that absolute path. Next,\n check if the file is relative to the current source dir. If the path\n still isn't resolved do the following:\n- Linux + BSD:\n+ Windows:\n+ - Error\n+ *:\n - $XDG_DATA_HOME/meson/cross (or ~/.local/share/meson/cross if\n undefined)\n - $XDG_DATA_DIRS/meson/cross (or\n /usr/local/share/meson/cross:/usr/share/meson/cross if undefined)\n - Error\n- *:\n- - Error\n- BSD follows the Linux path and will honor XDG_* if set. This simplifies\n- the implementation somewhat, especially since most BSD users wont set\n- those environment variables.\n+\n+ Non-Windows follows the Linux path and will honor XDG_* if set. This\n+ simplifies the implementation somewhat.\n \"\"\"\n if filename is None:\n return None\n@@ -242,7 +242,7 @@ class CoreData:\n path_to_try = os.path.abspath(filename)\n if os.path.exists(path_to_try):\n return path_to_try\n- if sys.platform == 'linux' or 'bsd' in sys.platform.lower():\n+ if sys.platform != 'win32':\n paths = [\n os.environ.get('XDG_DATA_HOME', os.path.expanduser('~/.local/share')),\n ] + os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(':')\n" }, { "change_type": "MODIFY", "old_path": "run_unittests.py", "new_path": "run_unittests.py", "diff": "@@ -1749,6 +1749,53 @@ int main(int argc, char **argv) {\n self._run(ninja,\n workdir=os.path.join(tmpdir, 'builddir'))\n \n+ def test_cross_file_system_paths(self):\n+ if is_windows():\n+ raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')\n+\n+ testdir = os.path.join(self.common_test_dir, '1 trivial')\n+ cross_content = textwrap.dedent(\"\"\"\\\n+ [binaries]\n+ c = '/usr/bin/cc'\n+ ar = '/usr/bin/ar'\n+ strip = '/usr/bin/ar'\n+\n+ [properties]\n+\n+ [host_machine]\n+ system = 'linux'\n+ cpu_family = 'x86'\n+ cpu = 'i686'\n+ endian = 'little'\n+ \"\"\")\n+\n+ with tempfile.TemporaryDirectory() as d:\n+ dir_ = os.path.join(d, 'meson', 'cross')\n+ os.makedirs(dir_)\n+ with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:\n+ f.write(cross_content)\n+ name = os.path.basename(f.name)\n+\n+ with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):\n+ self.init(testdir, ['--cross-file=' + name], inprocess=True)\n+ self.wipe()\n+\n+ with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):\n+ os.environ.pop('XDG_DATA_HOME', None)\n+ self.init(testdir, ['--cross-file=' + name], inprocess=True)\n+ self.wipe()\n+\n+ with tempfile.TemporaryDirectory() as d:\n+ dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')\n+ os.makedirs(dir_)\n+ with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:\n+ f.write(cross_content)\n+ name = os.path.basename(f.name)\n+\n+ with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):\n+ self.init(testdir, ['--cross-file=' + name], inprocess=True)\n+ self.wipe()\n+\n \n class FailureTests(BasePlatformTests):\n '''\n@@ -2546,50 +2593,6 @@ endian = 'little'\n self.init(testdir, ['-Db_lto=true'], default_args=False)\n self.build('reconfigure')\n \n- def test_cross_file_system_paths(self):\n- testdir = os.path.join(self.common_test_dir, '1 trivial')\n- cross_content = textwrap.dedent(\"\"\"\\\n- [binaries]\n- c = '/usr/bin/cc'\n- ar = '/usr/bin/ar'\n- strip = '/usr/bin/ar'\n-\n- [properties]\n-\n- [host_machine]\n- system = 'linux'\n- cpu_family = 'x86'\n- cpu = 'i686'\n- endian = 'little'\n- \"\"\")\n-\n- with tempfile.TemporaryDirectory() as d:\n- dir_ = os.path.join(d, 'meson', 'cross')\n- os.makedirs(dir_)\n- with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:\n- f.write(cross_content)\n- name = os.path.basename(f.name)\n-\n- with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):\n- self.init(testdir, ['--cross-file=' + name], inprocess=True)\n- self.wipe()\n-\n- with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):\n- os.environ.pop('XDG_DATA_HOME', None)\n- self.init(testdir, ['--cross-file=' + name], inprocess=True)\n- self.wipe()\n-\n- with tempfile.TemporaryDirectory() as d:\n- dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')\n- os.makedirs(dir_)\n- with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:\n- f.write(cross_content)\n- name = os.path.basename(f.name)\n-\n- with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):\n- self.init(testdir, ['--cross-file=' + name], inprocess=True)\n- self.wipe()\n-\n def test_vala_generated_source_buildir_inside_source_tree(self):\n '''\n Test that valac outputs generated C files in the expected location when\n" } ]
ea3b54d40252fcb87eb1852223f125398b1edbdf
mesonbuild/meson
25.02.2018 15:49:58
Apache License 2.0
Use include_directories for D impdirs. Change the code to store D properties as plain data. Only convert them to compiler flags in the backend. This also means we can fully parse D arguments without needing to know the compiler being used.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/ninjabackend.py", "new_path": "mesonbuild/backend/ninjabackend.py", "diff": "@@ -2257,6 +2257,9 @@ rule FORTRAN_DEP_HACK\n depelem.write(outfile)\n commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))\n \n+ if compiler.language == 'd':\n+ commands += compiler.get_feature_args(target.d_features, self.build_to_src)\n+\n element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)\n for d in header_deps:\n if isinstance(d, File):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/build.py", "new_path": "mesonbuild/build.py", "diff": "@@ -355,6 +355,7 @@ class BuildTarget(Target):\n self.extra_args = {}\n self.generated = []\n self.extra_files = []\n+ self.d_features = {}\n # Sources can be:\n # 1. Pre-existing source files in the source tree\n # 2. Pre-existing sources generated by configure_file in the build tree\n@@ -682,12 +683,15 @@ just like those detected with the dependency() function.''')\n dfeature_versions = kwargs.get('d_module_versions', None)\n if dfeature_versions:\n dfeatures['versions'] = dfeature_versions\n- dfeature_import_dirs = kwargs.get('d_import_dirs', None)\n- if dfeature_import_dirs:\n+ if 'd_import_dirs' in kwargs:\n+ dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)\n+ for d in dfeature_import_dirs:\n+ if not isinstance(d, IncludeDirs):\n+ raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')\n dfeatures['import_dirs'] = dfeature_import_dirs\n if dfeatures:\n if 'd' in self.compilers:\n- self.add_compiler_args('d', self.compilers['d'].get_feature_args(dfeatures))\n+ self.d_features = dfeatures\n \n self.link_args = extract_as_list(kwargs, 'link_args')\n for i in self.link_args:\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/d.py", "new_path": "mesonbuild/compilers/d.py", "diff": "@@ -93,7 +93,7 @@ class DCompiler(Compiler):\n # FIXME: Make this work for Windows, MacOS and cross-compiling\n return get_gcc_soname_args(GCC_STANDARD, prefix, shlib_name, suffix, path, soversion, is_shared_module)\n \n- def get_feature_args(self, kwargs):\n+ def get_feature_args(self, kwargs, build_to_src):\n res = []\n if 'unittest' in kwargs:\n unittest = kwargs.pop('unittest')\n@@ -122,8 +122,16 @@ class DCompiler(Compiler):\n import_dir_arg = d_feature_args[self.id]['import_dir']\n if not import_dir_arg:\n raise EnvironmentException('D compiler %s does not support the \"string import directories\" feature.' % self.name_string())\n- for d in import_dirs:\n- res.append('{0}{1}'.format(import_dir_arg, d))\n+ for idir_obj in import_dirs:\n+ basedir = idir_obj.get_curdir()\n+ for idir in idir_obj.get_incdirs():\n+ # Avoid superfluous '/.' at the end of paths when d is '.'\n+ if idir not in ('', '.'):\n+ expdir = os.path.join(basedir, idir)\n+ else:\n+ expdir = basedir\n+ srctreedir = os.path.join(build_to_src, expdir)\n+ res.append('{0}{1}'.format(import_dir_arg, srctreedir))\n \n if kwargs:\n raise EnvironmentException('Unknown D compiler feature(s) selected: %s' % ', '.join(kwargs.keys()))\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -37,6 +37,7 @@ from pathlib import PurePath\n \n import importlib\n \n+\n def stringifyUserArguments(args):\n if isinstance(args, list):\n return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args])\n@@ -247,7 +248,7 @@ class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder):\n return val\n \n def get(self, name):\n- return self.held_object.values[name] # (val, desc)\n+ return self.held_object.values[name] # (val, desc)\n \n def keys(self):\n return self.held_object.values.keys()\n@@ -816,7 +817,8 @@ class CompilerHolder(InterpreterObject):\n '''\n if not hasattr(self.compiler, 'get_feature_args'):\n raise InterpreterException('This {} compiler has no feature arguments.'.format(self.compiler.get_display_language()))\n- return self.compiler.get_feature_args({'unittest': 'true'})\n+ build_to_src = os.path.relpath(self.environment.get_source_dir(), self.environment.get_build_dir())\n+ return self.compiler.get_feature_args({'unittest': 'true'}, build_to_src)\n \n def has_member_method(self, args, kwargs):\n if len(args) != 2:\n@@ -1309,6 +1311,7 @@ class MesonMain(InterpreterObject):\n return args[1]\n raise InterpreterException('Unknown cross property: %s.' % propname)\n \n+\n pch_kwargs = set(['c_pch', 'cpp_pch'])\n \n lang_arg_kwargs = set([\n@@ -2847,12 +2850,17 @@ root and issuing %s.\n @permittedKwargs(permitted_kwargs['include_directories'])\n @stringArgs\n def func_include_directories(self, node, args, kwargs):\n+ return self.build_incdir_object(args, kwargs.get('is_system', False))\n+\n+ def build_incdir_object(self, incdir_strings, is_system=False):\n+ if not isinstance(is_system, bool):\n+ raise InvalidArguments('Is_system must be boolean.')\n src_root = self.environment.get_source_dir()\n build_root = self.environment.get_build_dir()\n absbase_src = os.path.join(src_root, self.subdir)\n absbase_build = os.path.join(build_root, self.subdir)\n \n- for a in args:\n+ for a in incdir_strings:\n if a.startswith(src_root):\n raise InvalidArguments('''Tried to form an absolute path to a source dir. You should not do that but use\n relative paths instead.\n@@ -2875,10 +2883,7 @@ different subdirectory.\n absdir_build = os.path.join(absbase_build, a)\n if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build):\n raise InvalidArguments('Include dir %s does not exist.' % a)\n- is_system = kwargs.get('is_system', False)\n- if not isinstance(is_system, bool):\n- raise InvalidArguments('Is_system must be boolean.')\n- i = IncludeDirsHolder(build.IncludeDirs(self.subdir, args, is_system))\n+ i = IncludeDirsHolder(build.IncludeDirs(self.subdir, incdir_strings, is_system))\n return i\n \n @permittedKwargs(permitted_kwargs['add_test_setup'])\n@@ -3106,6 +3111,7 @@ different subdirectory.\n else:\n mlog.debug('Unknown target type:', str(targetholder))\n raise RuntimeError('Unreachable code')\n+ self.kwarg_strings_to_includedirs(kwargs)\n target = targetclass(name, self.subdir, self.subproject, is_cross, sources, objs, self.environment, kwargs)\n if is_cross:\n self.add_cross_stdlib_info(target)\n@@ -3114,6 +3120,23 @@ different subdirectory.\n self.project_args_frozen = True\n return l\n \n+ def kwarg_strings_to_includedirs(self, kwargs):\n+ if 'd_import_dirs' in kwargs:\n+ items = mesonlib.extract_as_list(kwargs, 'd_import_dirs')\n+ cleaned_items = []\n+ for i in items:\n+ if isinstance(i, str):\n+ # BW compatibility. This was permitted so we must support it\n+ # for a few releases so people can transition to \"correct\"\n+ # path declarations.\n+ if i.startswith(self.environment.get_source_dir()):\n+ mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead.\n+This will become a hard error in the future.''')\n+ i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir))\n+ i = self.build_incdir_object([i])\n+ cleaned_items.append(i)\n+ kwargs['d_import_dirs'] = cleaned_items\n+\n def get_used_languages(self, target):\n result = {}\n for i in target.sources:\n@@ -3152,6 +3175,7 @@ different subdirectory.\n if idx >= len(arg_strings):\n raise InterpreterException('Format placeholder @{}@ out of range.'.format(idx))\n return arg_strings[idx]\n+\n return re.sub(r'@(\\d+)@', arg_replace, templ)\n \n # Only permit object extraction from the same subproject\n" }, { "change_type": "MODIFY", "old_path": "test cases/d/9 features/meson.build", "new_path": "test cases/d/9 features/meson.build", "diff": "@@ -1,8 +1,22 @@\n project('D Features', 'd')\n \n-# directory for data\n+# ONLY FOR BACKWARDS COMPATIBILITY.\n+# DO NOT DO THIS IN NEW CODE!\n+# USE include_directories() INSTEAD OF BUILDING\n+# STRINGS TO PATHS MANUALLY!\n data_dir = join_paths(meson.current_source_dir(), 'data')\n \n+e_plain_bcompat = executable('dapp_menu_bcompat',\n+ 'app.d',\n+ d_import_dirs: [data_dir]\n+)\n+test('dapp_menu_t_fail_bcompat', e_plain_bcompat, should_fail: true)\n+test('dapp_menu_t_bcompat', e_plain_bcompat, args: ['menu'])\n+\n+# directory for data\n+# This is the correct way to do this.\n+data_dir = include_directories('data')\n+\n e_plain = executable('dapp_menu',\n 'app.d',\n d_import_dirs: [data_dir]\n@@ -10,6 +24,7 @@ e_plain = executable('dapp_menu',\n test('dapp_menu_t_fail', e_plain, should_fail: true)\n test('dapp_menu_t', e_plain, args: ['menu'])\n \n+\n # test feature versions and string imports\n e_versions = executable('dapp_versions',\n 'app.d',\n" } ]
060560bf6250fcf9b2b528226176d322af93711b
mesonbuild/meson
11.09.2017 20:36:38
Apache License 2.0
Use test setups from the active (sub)project by default. Replace the logic where a test setup with no project specifier defaults to the main project with one that takes the test setup from the same (sub)project from where the to-be-executed test has been read from.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/mtest.py", "new_path": "mesonbuild/mtest.py", "diff": "@@ -28,6 +28,7 @@ import concurrent.futures as conc\n import platform\n import signal\n import random\n+from copy import deepcopy\n \n # GNU autotools interprets a return code of 77 from tests it executes to\n # mean that the test should be skipped.\n@@ -89,7 +90,7 @@ parser.add_argument('-v', '--verbose', default=False, action='store_true',\n help='Do not redirect stdout and stderr')\n parser.add_argument('-q', '--quiet', default=False, action='store_true',\n help='Produce less output to the terminal.')\n-parser.add_argument('-t', '--timeout-multiplier', type=float, default=None,\n+parser.add_argument('-t', '--timeout-multiplier', type=float, default=1,\n help='Define a multiplier for test timeout, for example '\n ' when running tests in particular conditions they might take'\n ' more time to execute.')\n@@ -192,7 +193,17 @@ class TestHarness:\n if self.jsonlogfile:\n self.jsonlogfile.close()\n \n- def run_single_test(self, wrap, test):\n+ def get_test_env(self, options, test):\n+ if options.setup:\n+ env = merge_suite_options(options, test)\n+ else:\n+ env = os.environ.copy()\n+ if isinstance(test.env, build.EnvironmentVariables):\n+ test.env = test.env.get_env(env)\n+ env.update(test.env)\n+ return env\n+\n+ def run_single_test(self, test):\n if test.fname[0].endswith('.jar'):\n cmd = ['java', '-jar'] + test.fname\n elif not test.is_cross_built and run_with_mono(test.fname[0]):\n@@ -215,24 +226,26 @@ class TestHarness:\n stde = None\n returncode = GNU_SKIP_RETURNCODE\n else:\n+ test_opts = deepcopy(self.options)\n+ test_env = self.get_test_env(test_opts, test)\n+ wrap = self.get_wrapper(test_opts)\n+\n+ if test_opts.gdb:\n+ test.timeout = None\n+\n cmd = wrap + cmd + test.cmd_args + self.options.test_args\n starttime = time.time()\n- child_env = os.environ.copy()\n- child_env.update(self.options.global_env.get_env(child_env))\n- if isinstance(test.env, build.EnvironmentVariables):\n- test.env = test.env.get_env(child_env)\n \n- child_env.update(test.env)\n if len(test.extra_paths) > 0:\n- child_env['PATH'] = os.pathsep.join(test.extra_paths + ['']) + child_env['PATH']\n+ test_env['PATH'] = os.pathsep.join(test.extra_paths + ['']) + test_env['PATH']\n \n # If MALLOC_PERTURB_ is not set, or if it is set to an empty value,\n # (i.e., the test or the environment don't explicitly set it), set\n # it ourselves. We do this unconditionally for regular tests\n # because it is extremely useful to have.\n # Setting MALLOC_PERTURB_=\"0\" will completely disable this feature.\n- if ('MALLOC_PERTURB_' not in child_env or not child_env['MALLOC_PERTURB_']) and not self.options.benchmark:\n- child_env['MALLOC_PERTURB_'] = str(random.randint(1, 255))\n+ if ('MALLOC_PERTURB_' not in test_env or not test_env['MALLOC_PERTURB_']) and not self.options.benchmark:\n+ test_env['MALLOC_PERTURB_'] = str(random.randint(1, 255))\n \n setsid = None\n stdout = None\n@@ -247,7 +260,7 @@ class TestHarness:\n p = subprocess.Popen(cmd,\n stdout=stdout,\n stderr=stderr,\n- env=child_env,\n+ env=test_env,\n cwd=test.workdir,\n preexec_fn=setsid)\n timed_out = False\n@@ -255,7 +268,7 @@ class TestHarness:\n if test.timeout is None:\n timeout = None\n else:\n- timeout = test.timeout * self.options.timeout_multiplier\n+ timeout = test.timeout * test_opts.timeout_multiplier\n try:\n (stdo, stde) = p.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n@@ -444,7 +457,7 @@ TIMEOUT: %4d\n logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase)\n \n if self.options.wrapper:\n- namebase = os.path.basename(self.get_wrapper()[0])\n+ namebase = os.path.basename(self.get_wrapper(self.options)[0])\n elif self.options.setup:\n namebase = self.options.setup.replace(\":\", \"_\")\n \n@@ -459,16 +472,16 @@ TIMEOUT: %4d\n self.logfile.write('Log of Meson test suite run on %s\\n\\n'\n % datetime.datetime.now().isoformat())\n \n- def get_wrapper(self):\n+ def get_wrapper(self, options):\n wrap = []\n- if self.options.gdb:\n+ if options.gdb:\n wrap = ['gdb', '--quiet', '--nh']\n- if self.options.repeat > 1:\n+ if options.repeat > 1:\n wrap += ['-ex', 'run', '-ex', 'quit']\n # Signal the end of arguments to gdb\n wrap += ['--args']\n- if self.options.wrapper:\n- wrap += self.options.wrapper\n+ if options.wrapper:\n+ wrap += options.wrapper\n assert(isinstance(wrap, list))\n return wrap\n \n@@ -487,7 +500,6 @@ TIMEOUT: %4d\n futures = []\n numlen = len('%d' % len(tests))\n self.open_log_files()\n- wrap = self.get_wrapper()\n startdir = os.getcwd()\n if self.options.wd:\n os.chdir(self.options.wd)\n@@ -497,18 +509,15 @@ TIMEOUT: %4d\n for i, test in enumerate(tests):\n visible_name = self.get_pretty_suite(test)\n \n- if self.options.gdb:\n- test.timeout = None\n-\n if not test.is_parallel or self.options.gdb:\n self.drain_futures(futures)\n futures = []\n- res = self.run_single_test(wrap, test)\n+ res = self.run_single_test(test)\n self.print_stats(numlen, tests, visible_name, res, i)\n else:\n if not executor:\n executor = conc.ThreadPoolExecutor(max_workers=self.options.num_processes)\n- f = executor.submit(self.run_single_test, wrap, test)\n+ f = executor.submit(self.run_single_test, test)\n futures.append((f, numlen, tests, visible_name, i))\n if self.options.repeat > 1 and self.fail_count:\n break\n@@ -549,15 +558,19 @@ def list_tests(th):\n for t in tests:\n print(th.get_pretty_suite(t))\n \n-def merge_suite_options(options):\n+def merge_suite_options(options, test):\n buildfile = os.path.join(options.wd, 'meson-private/build.dat')\n with open(buildfile, 'rb') as f:\n build = pickle.load(f)\n- if \":\" not in options.setup:\n- options.setup = (build.subproject if build.subproject else build.project_name) + \":\" + options.setup\n- if options.setup not in build.test_setups:\n- sys.exit('Unknown test setup: %s' % options.setup)\n- current = build.test_setups[options.setup]\n+ if \":\" in options.setup:\n+ if options.setup not in build.test_setups:\n+ sys.exit(\"Unknown test setup '%s'.\" % options.setup)\n+ current = build.test_setups[options.setup]\n+ else:\n+ full_name = test.project_name + \":\" + options.setup\n+ if full_name not in build.test_setups:\n+ sys.exit(\"Test setup '%s' not found from project '%s'.\" % (options.setup, test.project_name))\n+ current = build.test_setups[full_name]\n if not options.gdb:\n options.gdb = current.gdb\n if options.timeout_multiplier is None:\n@@ -568,7 +581,7 @@ def merge_suite_options(options):\n sys.exit('Conflict: both test setup and command line specify an exe wrapper.')\n if options.wrapper is None:\n options.wrapper = current.exe_wrapper\n- return current.env\n+ return current.env.get_env(os.environ.copy())\n \n def rebuild_all(wd):\n if not os.path.isfile(os.path.join(wd, 'build.ninja')):\n@@ -595,15 +608,6 @@ def run(args):\n if options.benchmark:\n options.num_processes = 1\n \n- if options.setup is not None:\n- global_env = merge_suite_options(options)\n- else:\n- global_env = build.EnvironmentVariables()\n- if options.timeout_multiplier is None:\n- options.timeout_multiplier = 1\n-\n- setattr(options, 'global_env', global_env)\n-\n if options.verbose and options.quiet:\n print('Can not be both quiet and verbose at the same time.')\n return 1\n" } ]
d012b5b997e917a971bca1236a065453493c780d
mesonbuild/meson
29.05.2017 20:17:28
Apache License 2.0
Create a helper for checking if a string has a path component This is used in a number of places, and in some places it is incomplete. Use a helper to ensure it's used properly.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/ninjabackend.py", "new_path": "mesonbuild/backend/ninjabackend.py", "diff": "@@ -27,7 +27,7 @@ from .. import compilers\n from ..compilers import CompilerArgs\n from ..linkers import ArLinker\n from ..mesonlib import File, MesonException, OrderedSet\n-from ..mesonlib import get_compiler_for_source\n+from ..mesonlib import get_compiler_for_source, has_path_sep\n from .backends import CleanTrees, InstallData\n from ..build import InvalidArguments\n \n@@ -1335,7 +1335,7 @@ int dummy;\n \n # Set runtime-paths so we can run executables without needing to set\n # LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.\n- if '/' in target.name or '\\\\' in target.name:\n+ if has_path_sep(target.name):\n # Target names really should not have slashes in them, but\n # unfortunately we did not check for that and some downstream projects\n # now have them. Once slashes are forbidden, remove this bit.\n@@ -2324,7 +2324,7 @@ rule FORTRAN_DEP_HACK\n # FIXME FIXME: The usage of this is a terrible and unreliable hack\n if isinstance(fname, File):\n return fname.subdir != ''\n- return '/' in fname or '\\\\' in fname\n+ return has_path_sep(fname)\n \n # Fortran is a bit weird (again). When you link against a library, just compiling a source file\n # requires the mod files that are output when single files are built. To do this right we would need to\n@@ -2370,7 +2370,7 @@ rule FORTRAN_DEP_HACK\n pch = target.get_pch(lang)\n if not pch:\n continue\n- if '/' not in pch[0] or '/' not in pch[-1]:\n+ if not has_path_sep(pch[0]) or not has_path_sep(pch[-1]):\n msg = 'Precompiled header of {!r} must not be in the same ' \\\n 'directory as source, please put it in a subdirectory.' \\\n ''.format(target.get_basename())\n@@ -2547,7 +2547,7 @@ rule FORTRAN_DEP_HACK\n commands += linker.get_option_link_args(self.environment.coredata.compiler_options)\n # Set runtime-paths so we can run executables without needing to set\n # LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.\n- if '/' in target.name or '\\\\' in target.name:\n+ if has_path_sep(target.name):\n # Target names really should not have slashes in them, but\n # unfortunately we did not check for that and some downstream projects\n # now have them. Once slashes are forbidden, remove this bit.\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/build.py", "new_path": "mesonbuild/build.py", "diff": "@@ -23,7 +23,7 @@ from . import mlog\n from .mesonlib import File, MesonException, listify, extract_as_list\n from .mesonlib import typeslistify, stringlistify, classify_unity_sources\n from .mesonlib import get_filenames_templates_dict, substitute_values\n-from .mesonlib import for_windows, for_darwin, for_cygwin, for_android\n+from .mesonlib import for_windows, for_darwin, for_cygwin, for_android, has_path_sep\n from .compilers import is_object, clike_langs, sort_clike, lang_suffixes\n \n known_basic_kwargs = {'install': True,\n@@ -286,7 +286,7 @@ class EnvironmentVariables:\n \n class Target:\n def __init__(self, name, subdir, subproject, build_by_default):\n- if '/' in name or '\\\\' in name:\n+ if has_path_sep(name):\n # Fix failing test 53 when this becomes an error.\n mlog.warning('''Target \"%s\" has a path separator in its name.\n This is not supported, it can cause unexpected failures and will become\n@@ -1067,7 +1067,7 @@ class Generator:\n raise InvalidArguments('\"output\" may only contain strings.')\n if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:\n raise InvalidArguments('Every element of \"output\" must contain @BASENAME@ or @PLAINNAME@.')\n- if '/' in rule or '\\\\' in rule:\n+ if has_path_sep(rule):\n raise InvalidArguments('\"outputs\" must not contain a directory separator.')\n if len(outputs) > 1:\n for o in outputs:\n@@ -1666,7 +1666,7 @@ class CustomTarget(Target):\n raise InvalidArguments('Output must not be empty.')\n if i.strip() == '':\n raise InvalidArguments('Output must not consist only of whitespace.')\n- if '/' in i:\n+ if has_path_sep(i):\n raise InvalidArguments('Output must not contain a path segment.')\n if '@INPUT@' in i or '@INPUT0@' in i:\n m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \\\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -21,7 +21,7 @@ from . import optinterpreter\n from . import compilers\n from .wrap import wrap, WrapMode\n from . import mesonlib\n-from .mesonlib import FileMode, Popen_safe, listify, extract_as_list\n+from .mesonlib import FileMode, Popen_safe, listify, extract_as_list, has_path_sep\n from .dependencies import ExternalProgram\n from .dependencies import InternalDependency, Dependency, DependencyException\n from .interpreterbase import InterpreterBase\n@@ -1863,7 +1863,7 @@ external dependencies (including libraries) must go to \"dependencies\".''')\n raise InterpreterException('Subproject name must not contain a \"..\" path segment.')\n if os.path.isabs(dirname):\n raise InterpreterException('Subproject name must not be an absolute path.')\n- if '\\\\' in dirname or '/' in dirname:\n+ if has_path_sep(dirname):\n mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.')\n if dirname in self.subproject_stack:\n fullstack = self.subproject_stack + [dirname]\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mesonlib.py", "new_path": "mesonbuild/mesonlib.py", "diff": "@@ -519,6 +519,12 @@ def get_library_dirs():\n unixdirs += glob('/lib/' + plat + '*')\n return unixdirs\n \n+def has_path_sep(name, sep='/\\\\'):\n+ 'Checks if any of the specified @sep path separators are in @name'\n+ for each in sep:\n+ if each in name:\n+ return True\n+ return False\n \n def do_replacement(regex, line, confdata):\n missing_variables = set()\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/scripts/yelphelper.py", "new_path": "mesonbuild/scripts/yelphelper.py", "diff": "@@ -17,6 +17,7 @@ import subprocess\n import shutil\n import argparse\n from .. import mlog\n+from ..mesonlib import has_path_sep\n from . import destdir_join\n from .gettext import read_linguas\n \n@@ -79,7 +80,7 @@ def install_help(srcdir, blddir, sources, media, langs, install_dir, destdir, pr\n elif symlinks:\n srcfile = os.path.join(c_install_dir, m)\n mlog.log('Symlinking %s to %s.' % (outfile, srcfile))\n- if '/' in m or '\\\\' in m:\n+ if has_path_sep(m):\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n try:\n try:\n@@ -94,7 +95,7 @@ def install_help(srcdir, blddir, sources, media, langs, install_dir, destdir, pr\n # Lang doesn't have media file so copy it over 'C' one\n infile = os.path.join(srcdir, 'C', m)\n mlog.log('Installing %s to %s' % (infile, outfile))\n- if '/' in m or '\\\\' in m:\n+ if has_path_sep(m):\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n shutil.copyfile(infile, outfile)\n shutil.copystat(infile, outfile)\n" } ]
2febb99eee8ed71c9122db88ca58dd33be0b9550
mesonbuild/meson
21.03.2018 14:58:50
Apache License 2.0
Fix b_ndebug=if-release option Provide get_{type}_options_for_target() methods that unite {type} and builtin options.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/backends.py", "new_path": "mesonbuild/backend/backends.py", "diff": "@@ -89,12 +89,17 @@ class OptionProxy:\n class OptionOverrideProxy:\n '''Mimic an option list but transparently override\n selected option values.'''\n- def __init__(self, overrides, options):\n+ def __init__(self, overrides, *options):\n self.overrides = overrides\n self.options = options\n \n def __getitem__(self, option_name):\n- base_opt = self.options[option_name]\n+ for opts in self.options:\n+ if option_name in opts:\n+ return self._get_override(option_name, opts[option_name])\n+ raise KeyError('Option not found', option_name)\n+\n+ def _get_override(self, option_name, base_opt):\n if option_name in self.overrides:\n return OptionProxy(base_opt.name, base_opt.validate_value(self.overrides[option_name]))\n return base_opt\n@@ -123,6 +128,20 @@ class Backend:\n def get_target_filename_abs(self, target):\n return os.path.join(self.environment.get_build_dir(), self.get_target_filename(target))\n \n+ def get_builtin_options_for_target(self, target):\n+ return OptionOverrideProxy(target.option_overrides,\n+ self.environment.coredata.builtins)\n+\n+ def get_base_options_for_target(self, target):\n+ return OptionOverrideProxy(target.option_overrides,\n+ self.environment.coredata.builtins,\n+ self.environment.coredata.base_options)\n+\n+ def get_compiler_options_for_target(self, target):\n+ return OptionOverrideProxy(target.option_overrides,\n+ # no code depends on builtins for now\n+ self.environment.coredata.compiler_options)\n+\n def get_option_for_target(self, option_name, target):\n if option_name in target.option_overrides:\n override = target.option_overrides[option_name]\n@@ -444,7 +463,7 @@ class Backend:\n # starting from hard-coded defaults followed by build options and so on.\n commands = CompilerArgs(compiler)\n \n- copt_proxy = OptionOverrideProxy(target.option_overrides, self.environment.coredata.compiler_options)\n+ copt_proxy = self.get_compiler_options_for_target(target)\n # First, the trivial ones that are impossible to override.\n #\n # Add -nostdinc/-nostdinc++ if needed; can't be overridden\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/backend/ninjabackend.py", "new_path": "mesonbuild/backend/ninjabackend.py", "diff": "@@ -2137,8 +2137,7 @@ rule FORTRAN_DEP_HACK\n return incs\n \n def _generate_single_compile(self, target, compiler, is_generated=False):\n- base_proxy = backends.OptionOverrideProxy(target.option_overrides,\n- self.environment.coredata.base_options)\n+ base_proxy = self.get_base_options_for_target(target)\n # Create an empty commands list, and start adding arguments from\n # various sources in the order in which they must override each other\n commands = CompilerArgs(compiler)\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/compilers.py", "new_path": "mesonbuild/compilers/compilers.py", "diff": "@@ -315,7 +315,9 @@ def get_base_compile_args(options, compiler):\n except KeyError:\n pass\n try:\n- if options['b_ndebug'].value == 'true' or (options['b_ndebug'].value == 'if-release' and options['buildtype'] == 'release'):\n+ if (options['b_ndebug'].value == 'true' or\n+ (options['b_ndebug'].value == 'if-release' and\n+ options['buildtype'].value == 'release')):\n args += ['-DNDEBUG']\n except KeyError:\n pass\n" } ]
fa6ca160548d7e8df9c4c724e6c96f5e004e5316
mesonbuild/meson
03.11.2017 13:44:38
Apache License 2.0
Add macOS linker versioning information This patch exploits the information residing in ltversion to set the -compatibility_version and -current_version flags that are passed to the linker on macOS.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/ninjabackend.py", "new_path": "mesonbuild/backend/ninjabackend.py", "diff": "@@ -2392,7 +2392,7 @@ rule FORTRAN_DEP_HACK\n commands += linker.get_pic_args()\n # Add -Wl,-soname arguments on Linux, -install_name on OS X\n commands += linker.get_soname_args(target.prefix, target.name, target.suffix,\n- abspath, target.soversion,\n+ abspath, target.soversion, target.ltversion,\n isinstance(target, build.SharedModule))\n # This is only visited when building for Windows using either GCC or Visual Studio\n if target.vs_module_defs and hasattr(linker, 'gen_vs_module_defs_args'):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/c.py", "new_path": "mesonbuild/compilers/c.py", "diff": "@@ -85,7 +85,7 @@ class CCompiler(Compiler):\n # Almost every compiler uses this for disabling warnings\n return ['-w']\n \n- def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n+ def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n return []\n \n def split_shlib_to_parts(self, fname):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/compilers.py", "new_path": "mesonbuild/compilers/compilers.py", "diff": "@@ -939,7 +939,7 @@ ICC_WIN = 2\n GNU_LD_AS_NEEDED = '-Wl,--as-needed'\n APPLE_LD_AS_NEEDED = '-Wl,-dead_strip_dylibs'\n \n-def get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n+def get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n if soversion is None:\n sostr = ''\n else:\n@@ -956,7 +956,15 @@ def get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, i\n if soversion is not None:\n install_name += '.' + soversion\n install_name += '.dylib'\n- return ['-install_name', os.path.join('@rpath', install_name)]\n+ args = ['-install_name', os.path.join('@rpath', install_name)]\n+ if version and len(version.split('.')) == 3:\n+ splitted = version.split('.')\n+ major = int(splitted[0])\n+ minor = int(splitted[1])\n+ revision = int(splitted[2])\n+ args += ['-compatibility_version', '%d' % (major + minor + 1)]\n+ args += ['-current_version', '%d.%d' % (major + minor + 1, revision)]\n+ return args\n else:\n raise RuntimeError('Not implemented yet.')\n \n@@ -1094,8 +1102,8 @@ class GnuCompiler:\n def split_shlib_to_parts(self, fname):\n return os.path.dirname(fname), fname\n \n- def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n- return get_gcc_soname_args(self.gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module)\n+ def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n+ return get_gcc_soname_args(self.gcc_type, prefix, shlib_name, suffix, path, soversion, version, is_shared_module)\n \n def get_std_shared_lib_link_args(self):\n return ['-shared']\n@@ -1193,7 +1201,7 @@ class ClangCompiler:\n # so it might change semantics at any time.\n return ['-include-pch', os.path.join(pch_dir, self.get_pch_name(header))]\n \n- def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n+ def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n if self.clang_type == CLANG_STANDARD:\n gcc_type = GCC_STANDARD\n elif self.clang_type == CLANG_OSX:\n@@ -1202,7 +1210,7 @@ class ClangCompiler:\n gcc_type = GCC_MINGW\n else:\n raise MesonException('Unreachable code when converting clang type to gcc type.')\n- return get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module)\n+ return get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, version, is_shared_module)\n \n def has_multi_arguments(self, args, env):\n myargs = ['-Werror=unknown-warning-option', '-Werror=unused-command-line-argument']\n@@ -1276,7 +1284,7 @@ class IntelCompiler:\n def split_shlib_to_parts(self, fname):\n return os.path.dirname(fname), fname\n \n- def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n+ def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n if self.icc_type == ICC_STANDARD:\n gcc_type = GCC_STANDARD\n elif self.icc_type == ICC_OSX:\n@@ -1285,7 +1293,7 @@ class IntelCompiler:\n gcc_type = GCC_MINGW\n else:\n raise MesonException('Unreachable code when converting icc type to gcc type.')\n- return get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module)\n+ return get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, version, is_shared_module)\n \n # TODO: centralise this policy more globally, instead\n # of fragmenting it into GnuCompiler and ClangCompiler\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/cs.py", "new_path": "mesonbuild/compilers/cs.py", "diff": "@@ -41,7 +41,7 @@ class CsCompiler(Compiler):\n def get_link_args(self, fname):\n return ['-r:' + fname]\n \n- def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n+ def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n return []\n \n def get_werror_args(self):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/d.py", "new_path": "mesonbuild/compilers/d.py", "diff": "@@ -89,9 +89,9 @@ class DCompiler(Compiler):\n def get_std_shared_lib_link_args(self):\n return ['-shared']\n \n- def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n+ def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n # FIXME: Make this work for Windows, MacOS and cross-compiling\n- return get_gcc_soname_args(GCC_STANDARD, prefix, shlib_name, suffix, path, soversion, is_shared_module)\n+ return get_gcc_soname_args(GCC_STANDARD, prefix, shlib_name, suffix, path, soversion, version, is_shared_module)\n \n def get_feature_args(self, kwargs, build_to_src):\n res = []\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/fortran.py", "new_path": "mesonbuild/compilers/fortran.py", "diff": "@@ -94,8 +94,8 @@ end program prog\n def split_shlib_to_parts(self, fname):\n return os.path.dirname(fname), fname\n \n- def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n- return get_gcc_soname_args(self.gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module)\n+ def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n+ return get_gcc_soname_args(self.gcc_type, prefix, shlib_name, suffix, path, soversion, version, is_shared_module)\n \n def get_dependency_gen_args(self, outtarget, outfile):\n # Disabled until this is fixed:\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/java.py", "new_path": "mesonbuild/compilers/java.py", "diff": "@@ -25,7 +25,7 @@ class JavaCompiler(Compiler):\n self.id = 'unknown'\n self.javarunner = 'java'\n \n- def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):\n+ def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, version, is_shared_module):\n return []\n \n def get_werror_args(self):\n" } ]
05c43cdcd19db98d53d5c9f1b50028d881471c2f
mesonbuild/meson
24.04.2018 21:39:59
Apache License 2.0
Add 'install_mode' to all installable targets This makes it possible to customize permissions of all installable targets, such as executable(), libraries, man pages, header files and custom or generated targets. This is useful, for instance, to install setuid/setgid binaries, which was hard to accomplish without access to this attribute.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/ninjabackend.py", "new_path": "mesonbuild/backend/ninjabackend.py", "diff": "@@ -724,6 +724,7 @@ int dummy;\n \"Pass 'false' for outputs that should not be installed and 'true' for\\n\" \\\n 'using the default installation directory for an output.'\n raise MesonException(m.format(t.name, num_out, t.get_outputs(), num_outdirs))\n+ install_mode = t.get_custom_install_mode()\n # Install the target output(s)\n if isinstance(t, build.BuildTarget):\n should_strip = self.get_option_for_target('strip', t)\n@@ -731,7 +732,7 @@ int dummy;\n # Done separately because of strip/aliases/rpath\n if outdirs[0] is not False:\n i = [self.get_target_filename(t), outdirs[0],\n- t.get_aliases(), should_strip, t.install_rpath]\n+ t.get_aliases(), should_strip, t.install_rpath, install_mode]\n d.targets.append(i)\n # On toolchains/platforms that use an import library for\n # linking (separate from the shared library with all the\n@@ -749,7 +750,7 @@ int dummy;\n implib_install_dir,\n # It has no aliases, should not be stripped, and\n # doesn't have an install_rpath\n- {}, False, '']\n+ {}, False, '', install_mode]\n d.targets.append(i)\n # Install secondary outputs. Only used for Vala right now.\n if num_outdirs > 1:\n@@ -758,7 +759,7 @@ int dummy;\n if outdir is False:\n continue\n f = os.path.join(self.get_target_dir(t), output)\n- d.targets.append([f, outdir, {}, False, None])\n+ d.targets.append([f, outdir, {}, False, None, install_mode])\n elif isinstance(t, build.CustomTarget):\n # If only one install_dir is specified, assume that all\n # outputs will be installed into it. This is for\n@@ -770,14 +771,14 @@ int dummy;\n if num_outdirs == 1 and num_out > 1:\n for output in t.get_outputs():\n f = os.path.join(self.get_target_dir(t), output)\n- d.targets.append([f, outdirs[0], {}, False, None])\n+ d.targets.append([f, outdirs[0], {}, False, None, install_mode])\n else:\n for output, outdir in zip(t.get_outputs(), outdirs):\n # User requested that we not install this output\n if outdir is False:\n continue\n f = os.path.join(self.get_target_dir(t), output)\n- d.targets.append([f, outdir, {}, False, None])\n+ d.targets.append([f, outdir, {}, False, None, install_mode])\n \n def generate_custom_install_script(self, d):\n result = []\n@@ -809,7 +810,7 @@ int dummy;\n msg = 'Invalid header type {!r} can\\'t be installed'\n raise MesonException(msg.format(f))\n abspath = f.absolute_path(srcdir, builddir)\n- i = [abspath, outdir]\n+ i = [abspath, outdir, h.get_custom_install_mode()]\n d.headers.append(i)\n \n def generate_man_install(self, d):\n@@ -823,7 +824,7 @@ int dummy;\n subdir = os.path.join(manroot, 'man' + num)\n srcabs = f.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())\n dstabs = os.path.join(subdir, os.path.basename(f.fname) + '.gz')\n- i = [srcabs, dstabs]\n+ i = [srcabs, dstabs, m.get_custom_install_mode()]\n d.man.append(i)\n \n def generate_data_install(self, d):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/build.py", "new_path": "mesonbuild/build.py", "diff": "@@ -64,6 +64,7 @@ buildtarget_kwargs = set([\n 'install',\n 'install_rpath',\n 'install_dir',\n+ 'install_mode',\n 'name_prefix',\n 'name_suffix',\n 'native',\n@@ -668,6 +669,9 @@ class BuildTarget(Target):\n def get_custom_install_dir(self):\n return self.install_dir\n \n+ def get_custom_install_mode(self):\n+ return self.install_mode\n+\n def process_kwargs(self, kwargs, environment):\n super().process_kwargs(kwargs)\n self.copy_kwargs(kwargs)\n@@ -745,6 +749,7 @@ This will become a hard error in a future Meson release.''')\n # the list index of that item will not be installed\n self.install_dir = typeslistify(kwargs.get('install_dir', [None]),\n (str, bool))\n+ self.install_mode = kwargs.get('install_mode', None)\n main_class = kwargs.get('main_class', '')\n if not isinstance(main_class, str):\n raise InvalidArguments('Main class must be a string')\n@@ -1626,6 +1631,7 @@ class CustomTarget(Target):\n 'capture',\n 'install',\n 'install_dir',\n+ 'install_mode',\n 'build_always',\n 'depends',\n 'depend_files',\n@@ -1774,9 +1780,11 @@ class CustomTarget(Target):\n # If an item in this list is False, the output corresponding to\n # the list index of that item will not be installed\n self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))\n+ self.install_mode = kwargs.get('install_mode', None)\n else:\n self.install = False\n self.install_dir = [None]\n+ self.install_mode = None\n self.build_always = kwargs.get('build_always', False)\n if not isinstance(self.build_always, bool):\n raise InvalidArguments('Argument build_always must be a boolean.')\n@@ -1803,6 +1811,9 @@ class CustomTarget(Target):\n def get_custom_install_dir(self):\n return self.install_dir\n \n+ def get_custom_install_mode(self):\n+ return self.install_mode\n+\n def get_outputs(self):\n return self.outputs\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -577,6 +577,7 @@ class Headers(InterpreterObject):\n self.sources = sources\n self.install_subdir = kwargs.get('subdir', '')\n self.custom_install_dir = kwargs.get('install_dir', None)\n+ self.custom_install_mode = kwargs.get('install_mode', None)\n if self.custom_install_dir is not None:\n if not isinstance(self.custom_install_dir, str):\n raise InterpreterException('Custom_install_dir must be a string.')\n@@ -593,6 +594,9 @@ class Headers(InterpreterObject):\n def get_custom_install_dir(self):\n return self.custom_install_dir\n \n+ def get_custom_install_mode(self):\n+ return self.custom_install_mode\n+\n class DataHolder(InterpreterObject, ObjectHolder):\n def __init__(self, data):\n InterpreterObject.__init__(self)\n@@ -624,6 +628,7 @@ class Man(InterpreterObject):\n self.sources = sources\n self.validate_sources()\n self.custom_install_dir = kwargs.get('install_dir', None)\n+ self.custom_install_mode = kwargs.get('install_mode', None)\n if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str):\n raise InterpreterException('Custom_install_dir must be a string.')\n \n@@ -639,6 +644,9 @@ class Man(InterpreterObject):\n def get_custom_install_dir(self):\n return self.custom_install_dir\n \n+ def get_custom_install_mode(self):\n+ return self.custom_install_mode\n+\n def get_sources(self):\n return self.sources\n \n@@ -1716,8 +1724,8 @@ permitted_kwargs = {'add_global_arguments': {'language'},\n 'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env'},\n 'benchmark': {'args', 'env', 'should_fail', 'timeout', 'workdir', 'suite'},\n 'build_target': known_build_target_kwargs,\n- 'configure_file': {'input', 'output', 'configuration', 'command', 'copy', 'install_dir', 'capture', 'install', 'format', 'output_format'},\n- 'custom_target': {'input', 'output', 'command', 'install', 'install_dir', 'build_always', 'capture', 'depends', 'depend_files', 'depfile', 'build_by_default'},\n+ 'configure_file': {'input', 'output', 'configuration', 'command', 'copy', 'install_dir', 'install_mode', 'capture', 'install', 'format', 'output_format'},\n+ 'custom_target': {'input', 'output', 'command', 'install', 'install_dir', 'install_mode', 'build_always', 'capture', 'depends', 'depend_files', 'depfile', 'build_by_default'},\n 'dependency': {'default_options', 'fallback', 'language', 'main', 'method', 'modules', 'optional_modules', 'native', 'required', 'static', 'version', 'private_headers'},\n 'declare_dependency': {'include_directories', 'link_with', 'sources', 'dependencies', 'compile_args', 'link_args', 'link_whole', 'version'},\n 'executable': build.known_exe_kwargs,\n@@ -1725,8 +1733,8 @@ permitted_kwargs = {'add_global_arguments': {'language'},\n 'generator': {'arguments', 'output', 'depfile', 'capture', 'preserve_path_from'},\n 'include_directories': {'is_system'},\n 'install_data': {'install_dir', 'install_mode', 'rename', 'sources'},\n- 'install_headers': {'install_dir', 'subdir'},\n- 'install_man': {'install_dir'},\n+ 'install_headers': {'install_dir', 'install_mode', 'subdir'},\n+ 'install_man': {'install_dir', 'install_mode'},\n 'install_subdir': {'exclude_files', 'exclude_directories', 'install_dir', 'install_mode', 'strip_directory'},\n 'jar': build.known_jar_kwargs,\n 'project': {'version', 'meson_version', 'default_options', 'license', 'subproject_dir'},\n@@ -2932,6 +2940,7 @@ root and issuing %s.\n if len(args) != 1:\n raise InterpreterException('custom_target: Only one positional argument is allowed, and it must be a string name')\n name = args[0]\n+ kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)\n tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, self.subproject, kwargs), self)\n self.add_target(name, tg.held_object)\n return tg\n@@ -3058,6 +3067,7 @@ root and issuing %s.\n @permittedKwargs(permitted_kwargs['install_headers'])\n def func_install_headers(self, node, args, kwargs):\n source_files = self.source_strings_to_files(args)\n+ kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)\n h = Headers(source_files, kwargs)\n self.build.headers.append(h)\n return h\n@@ -3065,6 +3075,7 @@ root and issuing %s.\n @permittedKwargs(permitted_kwargs['install_man'])\n def func_install_man(self, node, args, kwargs):\n fargs = self.source_strings_to_files(args)\n+ kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)\n m = Man(fargs, kwargs)\n self.build.man.append(m)\n return m\n@@ -3115,7 +3126,7 @@ root and issuing %s.\n self.subdir = prev_subdir\n \n def _get_kwarg_install_mode(self, kwargs):\n- if 'install_mode' not in kwargs:\n+ if kwargs.get('install_mode', None) is None:\n return None\n install_mode = []\n mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))\n@@ -3358,7 +3369,8 @@ root and issuing %s.\n idir = kwargs.get('install_dir', None)\n if isinstance(idir, str) and idir:\n cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)\n- self.build.data.append(build.Data([cfile], idir))\n+ install_mode = self._get_kwarg_install_mode(kwargs)\n+ self.build.data.append(build.Data([cfile], idir, install_mode))\n return mesonlib.File.from_built_file(self.subdir, output)\n \n @permittedKwargs(permitted_kwargs['include_directories'])\n@@ -3642,6 +3654,7 @@ different subdirectory.\n sources = self.source_strings_to_files(sources)\n objs = extract_as_list(kwargs, 'objects')\n kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')\n+ kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)\n if 'extra_files' in kwargs:\n ef = extract_as_list(kwargs, 'extra_files')\n kwargs['extra_files'] = self.source_strings_to_files(ef)\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/scripts/meson_install.py", "new_path": "mesonbuild/scripts/meson_install.py", "diff": "@@ -148,7 +148,7 @@ def do_copyfile(from_file, to_file):\n selinux_updates.append(to_file)\n append_to_log(to_file)\n \n-def do_copydir(data, src_dir, dst_dir, exclude):\n+def do_copydir(data, src_dir, dst_dir, exclude, install_mode):\n '''\n Copies the contents of directory @src_dir into @dst_dir.\n \n@@ -158,7 +158,7 @@ def do_copydir(data, src_dir, dst_dir, exclude):\n excluded\n foobar\n file\n- do_copydir(..., '/foo', '/dst/dir', {'bar/excluded'}) creates\n+ do_copydir(..., '/foo', '/dst/dir', {'bar/excluded'}, None) creates\n /dst/\n dir/\n bar/\n@@ -170,6 +170,7 @@ def do_copydir(data, src_dir, dst_dir, exclude):\n dst_dir: str, absolute path to the destination directory\n exclude: (set(str), set(str)), tuple of (exclude_files, exclude_dirs),\n each element of the set is a path relative to src_dir.\n+ install_mode: FileMode object, or None to use defaults.\n '''\n if not os.path.isabs(src_dir):\n raise ValueError('src_dir must be absolute, got %s' % src_dir)\n@@ -212,7 +213,7 @@ def do_copydir(data, src_dir, dst_dir, exclude):\n os.mkdir(parent_dir)\n shutil.copystat(os.path.dirname(abs_src), parent_dir)\n shutil.copy2(abs_src, abs_dst, follow_symlinks=False)\n- sanitize_permissions(abs_dst, data.install_umask)\n+ set_mode(abs_dst, install_mode, data.install_umask)\n append_to_log(abs_dst)\n \n def get_destdir_path(d, path):\n@@ -263,8 +264,7 @@ def install_subdirs(d):\n full_dst_dir = get_destdir_path(d, dst_dir)\n print('Installing subdir %s to %s' % (src_dir, full_dst_dir))\n d.dirmaker.makedirs(full_dst_dir, exist_ok=True)\n- do_copydir(d, src_dir, full_dst_dir, exclude)\n- set_mode(full_dst_dir, mode, d.install_umask)\n+ do_copydir(d, src_dir, full_dst_dir, exclude, mode)\n \n def install_data(d):\n for i in d.data:\n@@ -283,6 +283,7 @@ def install_man(d):\n outfilename = get_destdir_path(d, m[1])\n outdir = os.path.dirname(outfilename)\n d.dirmaker.makedirs(outdir, exist_ok=True)\n+ install_mode = m[2]\n print('Installing %s to %s' % (full_source_filename, outdir))\n if outfilename.endswith('.gz') and not full_source_filename.endswith('.gz'):\n with open(outfilename, 'wb') as of:\n@@ -294,7 +295,7 @@ def install_man(d):\n append_to_log(outfilename)\n else:\n do_copyfile(full_source_filename, outfilename)\n- sanitize_permissions(outfilename, d.install_umask)\n+ set_mode(outfilename, install_mode, d.install_umask)\n \n def install_headers(d):\n for t in d.headers:\n@@ -302,10 +303,11 @@ def install_headers(d):\n fname = os.path.basename(fullfilename)\n outdir = get_destdir_path(d, t[1])\n outfilename = os.path.join(outdir, fname)\n+ install_mode = t[2]\n print('Installing %s to %s' % (fname, outdir))\n d.dirmaker.makedirs(outdir, exist_ok=True)\n do_copyfile(fullfilename, outfilename)\n- sanitize_permissions(outfilename, d.install_umask)\n+ set_mode(outfilename, install_mode, d.install_umask)\n \n def run_install_script(d):\n env = {'MESON_SOURCE_ROOT': d.source_dir,\n@@ -364,13 +366,14 @@ def install_targets(d):\n aliases = t[2]\n should_strip = t[3]\n install_rpath = t[4]\n+ install_mode = t[5]\n print('Installing %s to %s' % (fname, outname))\n d.dirmaker.makedirs(outdir, exist_ok=True)\n if not os.path.exists(fname):\n raise RuntimeError('File {!r} could not be found'.format(fname))\n elif os.path.isfile(fname):\n do_copyfile(fname, outname)\n- sanitize_permissions(outname, d.install_umask)\n+ set_mode(outname, install_mode, d.install_umask)\n if should_strip and d.strip_bin is not None:\n if fname.endswith('.jar'):\n print('Not stripping jar target:', os.path.basename(fname))\n@@ -387,12 +390,11 @@ def install_targets(d):\n pdb_outname = os.path.splitext(outname)[0] + '.pdb'\n print('Installing pdb file %s to %s' % (pdb_filename, pdb_outname))\n do_copyfile(pdb_filename, pdb_outname)\n- sanitize_permissions(pdb_outname, d.install_umask)\n+ set_mode(pdb_outname, install_mode, d.install_umask)\n elif os.path.isdir(fname):\n fname = os.path.join(d.build_dir, fname.rstrip('/'))\n outname = os.path.join(outdir, os.path.basename(fname))\n- do_copydir(d, fname, outname, None)\n- sanitize_permissions(outname, d.install_umask)\n+ do_copydir(d, fname, outname, None, install_mode)\n else:\n raise RuntimeError('Unknown file type for {!r}'.format(fname))\n printed_symlink_error = False\n" } ]
8fb72510c3b7a7a5f08b4b20b7e1480f5868052a
mesonbuild/meson
13.05.2018 10:36:58
Apache License 2.0
Move to coredata some methods handling options Those methods only use coredata object, so better just move them as a coredata method.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/coredata.py", "new_path": "mesonbuild/coredata.py", "diff": "@@ -351,6 +351,37 @@ class CoreData:\n def get_external_preprocess_args(self, lang):\n return self.external_preprocess_args[lang]\n \n+ def merge_user_options(self, options):\n+ for (name, value) in options.items():\n+ if name not in self.user_options:\n+ self.user_options[name] = value\n+ else:\n+ oldval = self.user_options[name]\n+ if type(oldval) != type(value):\n+ self.user_options[name] = value\n+\n+ def set_options(self, options):\n+ for o in options:\n+ if '=' not in o:\n+ raise MesonException('Value \"%s\" not of type \"a=b\".' % o)\n+ (k, v) = o.split('=', 1)\n+ if is_builtin_option(k):\n+ self.set_builtin_option(k, v)\n+ elif k in self.backend_options:\n+ tgt = self.backend_options[k]\n+ tgt.set_value(v)\n+ elif k in self.user_options:\n+ tgt = self.user_options[k]\n+ tgt.set_value(v)\n+ elif k in self.compiler_options:\n+ tgt = self.compiler_options[k]\n+ tgt.set_value(v)\n+ elif k in self.base_options:\n+ tgt = self.base_options[k]\n+ tgt.set_value(v)\n+ else:\n+ raise MesonException('Unknown option %s.' % k)\n+\n def load(build_dir):\n filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')\n load_fail_msg = 'Coredata file {!r} is corrupted. Try with a fresh build tree.'.format(filename)\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/environment.py", "new_path": "mesonbuild/environment.py", "diff": "@@ -386,15 +386,6 @@ class Environment:\n previous_is_plaind = i == '-D'\n return False\n \n- def merge_options(self, options):\n- for (name, value) in options.items():\n- if name not in self.coredata.user_options:\n- self.coredata.user_options[name] = value\n- else:\n- oldval = self.coredata.user_options[name]\n- if type(oldval) != type(value):\n- self.coredata.user_options[name] = value\n-\n @staticmethod\n def get_gnu_compiler_defines(compiler):\n \"\"\"\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -2306,7 +2306,7 @@ to directly access options of other subprojects.''')\n self.build.environment.cmd_line_options.projectoptions,\n )\n oi.process(self.option_file)\n- self.build.environment.merge_options(oi.options)\n+ self.coredata.merge_user_options(oi.options)\n self.set_backend()\n self.active_projectname = proj_name\n self.project_version = kwargs.get('version', 'undefined')\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mconf.py", "new_path": "mesonbuild/mconf.py", "diff": "@@ -42,6 +42,9 @@ class Conf:\n def clear_cache(self):\n self.coredata.deps = {}\n \n+ def set_options(self, options):\n+ self.coredata.set_options(options)\n+\n def save(self):\n # Only called if something has changed so overwrite unconditionally.\n coredata.save(self.coredata, self.build_dir)\n@@ -94,28 +97,6 @@ class Conf:\n else:\n print(' {0:{width[0]}} {1:{width[1]}} {3:{width[3]}}'.format(*line, width=col_widths))\n \n- def set_options(self, options):\n- for o in options:\n- if '=' not in o:\n- raise ConfException('Value \"%s\" not of type \"a=b\".' % o)\n- (k, v) = o.split('=', 1)\n- if coredata.is_builtin_option(k):\n- self.coredata.set_builtin_option(k, v)\n- elif k in self.coredata.backend_options:\n- tgt = self.coredata.backend_options[k]\n- tgt.set_value(v)\n- elif k in self.coredata.user_options:\n- tgt = self.coredata.user_options[k]\n- tgt.set_value(v)\n- elif k in self.coredata.compiler_options:\n- tgt = self.coredata.compiler_options[k]\n- tgt.set_value(v)\n- elif k in self.coredata.base_options:\n- tgt = self.coredata.base_options[k]\n- tgt.set_value(v)\n- else:\n- raise ConfException('Unknown option %s.' % k)\n-\n def print_conf(self):\n print('Core properties:')\n print(' Source dir', self.build.environment.source_dir)\n" } ]
58ae2c9a8c52bcf881682286cc4393d85c87a07f
mesonbuild/meson
19.06.2018 09:35:25
Apache License 2.0
Rename clike_langs to clink_langs for clarity D is not a 'c-like' language, but it can link to C libraries. The same might be true of Rust in the future and Go when we add support for it. This contains no functionality changes.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/backends.py", "new_path": "mesonbuild/backend/backends.py", "diff": "@@ -338,7 +338,7 @@ class Backend:\n return self.build.static_cross_linker, []\n else:\n return self.build.static_linker, []\n- l, stdlib_args = target.get_clike_dynamic_linker_and_stdlibs()\n+ l, stdlib_args = target.get_clink_dynamic_linker_and_stdlibs()\n return l, stdlib_args\n \n @staticmethod\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/build.py", "new_path": "mesonbuild/build.py", "diff": "@@ -24,7 +24,7 @@ from .mesonlib import File, MesonException, listify, extract_as_list\n from .mesonlib import typeslistify, stringlistify, classify_unity_sources\n from .mesonlib import get_filenames_templates_dict, substitute_values\n from .mesonlib import for_windows, for_darwin, for_cygwin, for_android, has_path_sep\n-from .compilers import is_object, clike_langs, sort_clike, lang_suffixes\n+from .compilers import is_object, clink_langs, sort_clink, lang_suffixes\n from .interpreterbase import FeatureNew, FeatureNewKwargs\n \n pch_kwargs = set(['c_pch', 'cpp_pch'])\n@@ -490,16 +490,16 @@ class BuildTarget(Target):\n extra = set()\n for t in itertools.chain(self.link_targets, self.link_whole_targets):\n for name, compiler in t.compilers.items():\n- if name in clike_langs:\n+ if name in clink_langs:\n extra.add((name, compiler))\n- for name, compiler in sorted(extra, key=lambda p: sort_clike(p[0])):\n+ for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):\n self.compilers[name] = compiler\n \n if not self.compilers:\n # No source files or parent targets, target consists of only object\n- # files of unknown origin. Just add the first clike compiler\n+ # files of unknown origin. Just add the first clink compiler\n # that we have and hope that it can link these objects\n- for lang in clike_langs:\n+ for lang in clink_langs:\n if lang in compilers:\n self.compilers[lang] = compilers[lang]\n break\n@@ -556,9 +556,9 @@ class BuildTarget(Target):\n if lang not in self.compilers:\n self.compilers[lang] = compiler\n break\n- # Re-sort according to clike_langs\n+ # Re-sort according to clink_langs\n self.compilers = OrderedDict(sorted(self.compilers.items(),\n- key=lambda t: sort_clike(t[0])))\n+ key=lambda t: sort_clink(t[0])))\n \n # If all our sources are Vala, our target also needs the C compiler but\n # it won't get added above.\n@@ -995,7 +995,7 @@ You probably should put it in link_with instead.''')\n Sometimes you want to link to a C++ library that exports C API, which\n means the linker must link in the C++ stdlib, and we must use a C++\n compiler for linking. The same is also applicable for objc/objc++, etc,\n- so we can keep using clike_langs for the priority order.\n+ so we can keep using clink_langs for the priority order.\n \n See: https://github.com/mesonbuild/meson/issues/1653\n '''\n@@ -1014,9 +1014,9 @@ You probably should put it in link_with instead.''')\n langs.append(language)\n return langs\n \n- def get_clike_dynamic_linker_and_stdlibs(self):\n+ def get_clink_dynamic_linker_and_stdlibs(self):\n '''\n- We use the order of languages in `clike_langs` to determine which\n+ We use the order of languages in `clink_langs` to determine which\n linker to use in case the target has sources compiled with multiple\n compilers. All languages other than those in this list have their own\n linker.\n@@ -1033,7 +1033,7 @@ You probably should put it in link_with instead.''')\n # Languages used by dependencies\n dep_langs = self.get_langs_used_by_deps()\n # Pick a compiler based on the language priority-order\n- for l in clike_langs:\n+ for l in clink_langs:\n if l in self.compilers or l in dep_langs:\n try:\n linker = all_compilers[l]\n@@ -1071,7 +1071,7 @@ You probably should put it in link_with instead.''')\n 2. If the target contains only objects, process_compilers guesses and\n picks the first compiler that smells right.\n '''\n- linker, _ = self.get_clike_dynamic_linker_and_stdlibs()\n+ linker, _ = self.get_clink_dynamic_linker_and_stdlibs()\n # Mixing many languages with MSVC is not supported yet so ignore stdlibs.\n if linker and linker.get_id() == 'msvc':\n return True\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/__init__.py", "new_path": "mesonbuild/compilers/__init__.py", "diff": "@@ -28,7 +28,7 @@ __all__ = [\n 'all_languages',\n 'base_options',\n 'clib_langs',\n- 'clike_langs',\n+ 'clink_langs',\n 'c_suffixes',\n 'cpp_suffixes',\n 'get_macos_dylib_install_name',\n@@ -42,7 +42,7 @@ __all__ = [\n 'is_source',\n 'lang_suffixes',\n 'sanitizer_compile_args',\n- 'sort_clike',\n+ 'sort_clink',\n \n 'ArmCCompiler',\n 'ArmCPPCompiler',\n@@ -105,7 +105,7 @@ from .compilers import (\n all_languages,\n base_options,\n clib_langs,\n- clike_langs,\n+ clink_langs,\n c_suffixes,\n cpp_suffixes,\n get_macos_dylib_install_name,\n@@ -119,7 +119,7 @@ from .compilers import (\n is_library,\n lang_suffixes,\n sanitizer_compile_args,\n- sort_clike,\n+ sort_clink,\n ClangCompiler,\n CompilerArgs,\n GnuCompiler,\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/compilers.py", "new_path": "mesonbuild/compilers/compilers.py", "diff": "@@ -54,11 +54,11 @@ clib_langs = ('objcpp', 'cpp', 'objc', 'c', 'fortran',)\n # List of languages that can be linked with C code directly by the linker\n # used in build.py:process_compilers() and build.py:get_dynamic_linker()\n # XXX: Add Rust to this?\n-clike_langs = ('d',) + clib_langs\n-clike_suffixes = ()\n-for _l in clike_langs + ('vala',):\n- clike_suffixes += lang_suffixes[_l]\n-clike_suffixes += ('h', 'll', 's')\n+clink_langs = ('d',) + clib_langs\n+clink_suffixes = ()\n+for _l in clink_langs + ('vala',):\n+ clink_suffixes += lang_suffixes[_l]\n+clink_suffixes += ('h', 'll', 's')\n \n soregex = re.compile(r'.*\\.so(\\.[0-9]+)?(\\.[0-9]+)?(\\.[0-9]+)?$')\n \n@@ -72,18 +72,18 @@ cflags_mapping = {'c': 'CFLAGS',\n 'vala': 'VALAFLAGS',\n 'rust': 'RUSTFLAGS'}\n \n-# All these are only for C-like languages; see `clike_langs` above.\n+# All these are only for C-linkable languages; see `clink_langs` above.\n \n-def sort_clike(lang):\n+def sort_clink(lang):\n '''\n Sorting function to sort the list of languages according to\n- reversed(compilers.clike_langs) and append the unknown langs in the end.\n+ reversed(compilers.clink_langs) and append the unknown langs in the end.\n The purpose is to prefer C over C++ for files that can be compiled by\n both such as assembly, C, etc. Also applies to ObjC, ObjC++, etc.\n '''\n- if lang not in clike_langs:\n+ if lang not in clink_langs:\n return 1\n- return -clike_langs.index(lang)\n+ return -clink_langs.index(lang)\n \n def is_header(fname):\n if hasattr(fname, 'fname'):\n@@ -95,7 +95,7 @@ def is_source(fname):\n if hasattr(fname, 'fname'):\n fname = fname.fname\n suffix = fname.split('.')[-1].lower()\n- return suffix in clike_suffixes\n+ return suffix in clink_suffixes\n \n def is_assembly(fname):\n if hasattr(fname, 'fname'):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/dependencies/base.py", "new_path": "mesonbuild/dependencies/base.py", "diff": "@@ -27,7 +27,7 @@ from pathlib import PurePath\n \n from .. import mlog\n from .. import mesonlib\n-from ..compilers import clib_langs, clike_langs\n+from ..compilers import clib_langs\n from ..mesonlib import MesonException, OrderedSet\n from ..mesonlib import Popen_safe, version_compare_many, version_compare, listify\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/dependencies/misc.py", "new_path": "mesonbuild/dependencies/misc.py", "diff": "@@ -470,8 +470,8 @@ class PcapDependency(ExternalDependency):\n \n @staticmethod\n def get_pcap_lib_version(ctdep):\n- return ctdep.compiler.get_return_value('pcap_lib_version', 'string',\n- '#include <pcap.h>', ctdep.env, [], [ctdep])\n+ return ctdep.clib_compiler.get_return_value('pcap_lib_version', 'string',\n+ '#include <pcap.h>', ctdep.env, [], [ctdep])\n \n \n class CupsDependency(ExternalDependency):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -2433,7 +2433,7 @@ external dependencies (including libraries) must go to \"dependencies\".''')\n def func_add_languages(self, node, args, kwargs):\n disabled, required, feature = extract_required_kwarg(kwargs)\n if disabled:\n- for lang in sorted(args, key=compilers.sort_clike):\n+ for lang in sorted(args, key=compilers.sort_clink):\n mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled')\n return False\n return self.add_languages(args, required)\n@@ -2556,7 +2556,7 @@ external dependencies (including libraries) must go to \"dependencies\".''')\n def add_languages(self, args, required):\n success = True\n need_cross_compiler = self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler()\n- for lang in sorted(args, key=compilers.sort_clike):\n+ for lang in sorted(args, key=compilers.sort_clink):\n lang = lang.lower()\n if lang in self.coredata.compilers:\n comp = self.coredata.compilers[lang]\n" } ]
c151eb49678be24a75451a327812dd5fa569af73
mesonbuild/meson
22.06.2018 17:49:03
Apache License 2.0
Improve logging when dependency is anonymous Various pieces of logging assume the dependency has a name, and aren't grammatical if the dependency is '', so say it is anonymous instead.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -2805,10 +2805,11 @@ external dependencies (including libraries) must go to \"dependencies\".''')\n def func_dependency(self, node, args, kwargs):\n self.validate_arguments(args, 1, [str])\n name = args[0]\n+ display_name = name if name else '(anonymous)'\n \n disabled, required, feature = extract_required_kwarg(kwargs)\n if disabled:\n- mlog.log('Dependency', mlog.bold(name), 'skipped: feature', mlog.bold(feature), 'disabled')\n+ mlog.log('Dependency', mlog.bold(display_name), 'skipped: feature', mlog.bold(feature), 'disabled')\n return DependencyHolder(NotFoundDependency(self.environment))\n \n # writing just \"dependency('')\" is an error, because it can only fail\n@@ -2823,7 +2824,7 @@ external dependencies (including libraries) must go to \"dependencies\".''')\n if cached_dep:\n if required and not cached_dep.found():\n m = 'Dependency {!r} was already checked and was not found'\n- raise DependencyException(m.format(name))\n+ raise DependencyException(m.format(display_name))\n dep = cached_dep\n else:\n # If the dependency has already been configured, possibly by\n@@ -2905,17 +2906,18 @@ root and issuing %s.\n return fbinfo\n \n def dependency_fallback(self, name, kwargs):\n+ display_name = name if name else '(anonymous)'\n if self.coredata.wrap_mode in (WrapMode.nofallback, WrapMode.nodownload):\n mlog.log('Not looking for a fallback subproject for the dependency',\n- mlog.bold(name), 'because:\\nUse of fallback'\n+ mlog.bold(display_name), 'because:\\nUse of fallback'\n 'dependencies is disabled.')\n return None\n elif self.coredata.wrap_mode == WrapMode.forcefallback:\n mlog.log('Looking for a fallback subproject for the dependency',\n- mlog.bold(name), 'because:\\nUse of fallback dependencies is forced.')\n+ mlog.bold(display_name), 'because:\\nUse of fallback dependencies is forced.')\n else:\n mlog.log('Looking for a fallback subproject for the dependency',\n- mlog.bold(name))\n+ mlog.bold(display_name))\n dirname, varname = self.get_subproject_infos(kwargs)\n # Try to execute the subproject\n try:\n@@ -2933,7 +2935,7 @@ root and issuing %s.\n except Exception as e:\n mlog.log('Couldn\\'t use fallback subproject in',\n mlog.bold(os.path.join(self.subproject_dir, dirname)),\n- 'for the dependency', mlog.bold(name), '\\nReason:', str(e))\n+ 'for the dependency', mlog.bold(display_name), '\\nReason:', str(e))\n return None\n dep = self.get_subproject_dep(name, dirname, varname, kwargs.get('required', True))\n if not dep:\n@@ -2945,10 +2947,10 @@ root and issuing %s.\n found = dep.version_method([], {})\n if not self.check_subproject_version(wanted, found):\n mlog.log('Subproject', mlog.bold(subproj_path), 'dependency',\n- mlog.bold(varname), 'version is', mlog.bold(found),\n+ mlog.bold(display_name), 'version is', mlog.bold(found),\n 'but', mlog.bold(wanted), 'is required.')\n return None\n- mlog.log('Dependency', mlog.bold(name), 'from subproject',\n+ mlog.log('Dependency', mlog.bold(display_name), 'from subproject',\n mlog.bold(subproj_path), 'found:', mlog.green('YES'))\n return dep\n \n" } ]
4fb00ee1d8c79184e3a77f9266e90d42ff595f41
mesonbuild/meson
06.07.2018 23:08:13
Apache License 2.0
Add new method: mlog.deprecation() Instead of constructing it manually, use a helper.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/build.py", "new_path": "mesonbuild/build.py", "diff": "@@ -1792,7 +1792,7 @@ class CustomTarget(Target):\n if 'build_always' in kwargs and 'build_always_stale' in kwargs:\n raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')\n elif 'build_always' in kwargs:\n- mlog.warning('build_always is deprecated. Combine build_by_default and build_always_stale instead.')\n+ mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')\n if 'build_by_default' not in kwargs:\n self.build_by_default = kwargs['build_always']\n self.build_always_stale = kwargs['build_always']\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/coredata.py", "new_path": "mesonbuild/coredata.py", "diff": "@@ -169,7 +169,7 @@ class UserArrayOption(UserOption):\n if len(set(newvalue)) != len(newvalue):\n msg = 'Duplicated values in array option \"%s\" is deprecated. ' \\\n 'This will become a hard error in the future.' % (self.name)\n- mlog.log(mlog.red('DEPRECATION:'), msg)\n+ mlog.deprecation(msg)\n for i in newvalue:\n if not isinstance(i, str):\n raise MesonException('String array element \"{0}\" is not a string.'.format(str(newvalue)))\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -276,8 +276,7 @@ class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder):\n \n def validate_args(self, args, kwargs):\n if len(args) == 1 and isinstance(args[0], list) and len(args[0]) == 2:\n- mlog.log(mlog.red('DEPRECATION:'),\n- '''Passing a list as the single argument to configuration_data.set is deprecated.\n+ mlog.deprecation('''Passing a list as the single argument to configuration_data.set is deprecated.\n This will become a hard error in the future''')\n args = args[0]\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreterbase.py", "new_path": "mesonbuild/interpreterbase.py", "diff": "@@ -243,8 +243,9 @@ class FeatureDeprecated(FeatureCheckBase):\n return 'Deprecated features used:'\n \n def log_usage_warning(self, tv):\n- mlog.warning('Project targetting \\'{}\\' but tried to use feature deprecated '\n- 'since \\'{}\\': {}'.format(tv, self.feature_version, self.feature_name))\n+ mlog.deprecation('Project targetting \\'{}\\' but tried to use feature '\n+ 'deprecated since \\'{}\\': {}'\n+ ''.format(tv, self.feature_version, self.feature_name))\n \n \n class FeatureCheckKwargsBase:\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mlog.py", "new_path": "mesonbuild/mlog.py", "diff": "@@ -145,6 +145,8 @@ def _log_error(severity, *args, **kwargs):\n args = (yellow('WARNING:'),) + args\n elif severity == 'error':\n args = (red('ERROR:'),) + args\n+ elif severity == 'deprecation':\n+ args = (red('DEPRECATION:'),) + args\n else:\n assert False, 'Invalid severity ' + severity\n \n@@ -163,6 +165,9 @@ def error(*args, **kwargs):\n def warning(*args, **kwargs):\n return _log_error('warning', *args, **kwargs)\n \n+def deprecation(*args, **kwargs):\n+ return _log_error('deprecation', *args, **kwargs)\n+\n def exception(e):\n log()\n if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/modules/gnome.py", "new_path": "mesonbuild/modules/gnome.py", "diff": "@@ -683,7 +683,7 @@ class GnomeModule(ExtensionModule):\n \n langs = mesonlib.stringlistify(kwargs.pop('languages', []))\n if langs:\n- mlog.log(mlog.red('DEPRECATION:'), '''The \"languages\" argument of gnome.yelp() is deprecated.\n+ mlog.deprecation('''The \"languages\" argument of gnome.yelp() is deprecated.\n Use a LINGUAS file in the sources directory instead.\n This will become a hard error in the future.''')\n \n" } ]
e0ed1ceae2e00d6c6efab39d4712d2522d89e929
mesonbuild/meson
28.06.2018 17:26:35
Apache License 2.0
Refactor getting the host system of a cross compiler Use mesonlib.for_windows or mesonlib.for_cygwin instead of reimplementing them. Add CrossBuildInfo.get_host_system to shorten the repeated the code in the mesonlib.for_<platform> methods.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/environment.py", "new_path": "mesonbuild/environment.py", "diff": "@@ -345,13 +345,11 @@ class Environment:\n # static libraries, and executables.\n # Versioning is added to these names in the backends as-needed.\n cross = self.is_cross_build()\n- if (not cross and mesonlib.is_windows()) \\\n- or (cross and self.cross_info.has_host() and self.cross_info.config['host_machine']['system'] == 'windows'):\n+ if mesonlib.for_windows(cross, self):\n self.exe_suffix = 'exe'\n self.object_suffix = 'obj'\n self.win_libdir_layout = True\n- elif (not cross and mesonlib.is_cygwin()) \\\n- or (cross and self.cross_info.has_host() and self.cross_info.config['host_machine']['system'] == 'cygwin'):\n+ elif mesonlib.for_cygwin(cross, self):\n self.exe_suffix = 'exe'\n self.object_suffix = 'o'\n self.win_libdir_layout = True\n@@ -1039,6 +1037,12 @@ class CrossBuildInfo:\n def get_stdlib(self, language):\n return self.config['properties'][language + '_stdlib']\n \n+ def get_host_system(self):\n+ \"Name of host system like 'linux', or None\"\n+ if self.has_host():\n+ return self.config['host_machine']['system']\n+ return None\n+\n def get_properties(self):\n return self.config['properties']\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mesonlib.py", "new_path": "mesonbuild/mesonlib.py", "diff": "@@ -299,9 +299,7 @@ def for_windows(is_cross, env):\n \"\"\"\n if not is_cross:\n return is_windows()\n- elif env.cross_info.has_host():\n- return env.cross_info.config['host_machine']['system'] == 'windows'\n- return False\n+ return env.cross_info.get_host_system() == 'windows'\n \n def for_cygwin(is_cross, env):\n \"\"\"\n@@ -311,9 +309,7 @@ def for_cygwin(is_cross, env):\n \"\"\"\n if not is_cross:\n return is_cygwin()\n- elif env.cross_info.has_host():\n- return env.cross_info.config['host_machine']['system'] == 'cygwin'\n- return False\n+ return env.cross_info.get_host_system() == 'cygwin'\n \n def for_linux(is_cross, env):\n \"\"\"\n@@ -323,9 +319,7 @@ def for_linux(is_cross, env):\n \"\"\"\n if not is_cross:\n return is_linux()\n- elif env.cross_info.has_host():\n- return env.cross_info.config['host_machine']['system'] == 'linux'\n- return False\n+ return env.cross_info.get_host_system() == 'linux'\n \n def for_darwin(is_cross, env):\n \"\"\"\n@@ -335,9 +329,7 @@ def for_darwin(is_cross, env):\n \"\"\"\n if not is_cross:\n return is_osx()\n- elif env.cross_info.has_host():\n- return env.cross_info.config['host_machine']['system'] in ('darwin', 'ios')\n- return False\n+ return env.cross_info.get_host_system() in ('darwin', 'ios')\n \n def for_android(is_cross, env):\n \"\"\"\n@@ -347,9 +339,7 @@ def for_android(is_cross, env):\n \"\"\"\n if not is_cross:\n return is_android()\n- elif env.cross_info.has_host():\n- return env.cross_info.config['host_machine']['system'] == 'android'\n- return False\n+ return env.cross_info.get_host_system() == 'android'\n \n def for_haiku(is_cross, env):\n \"\"\"\n@@ -359,9 +349,7 @@ def for_haiku(is_cross, env):\n \"\"\"\n if not is_cross:\n return is_haiku()\n- elif env.cross_info.has_host():\n- return env.cross_info.config['host_machine']['system'] == 'haiku'\n- return False\n+ return env.cross_info.get_host_system() == 'haiku'\n \n def for_openbsd(is_cross, env):\n \"\"\"\n" } ]
28c1f31d7e2b46a8473d8ebe8f029fb7602fde09
mesonbuild/meson
29.08.2018 12:03:47
Apache License 2.0
Make `-std=` fallback remapping more robust * The current version matching logic is brittle with respect to Clang. LLVM and Apple Clang use slightly different but nowadays overlapping version ranges. Instead, we now just check whether the compiler supports the given `-std=` variant and try its respective fallback instead of testing version ranges.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/cpp.py", "new_path": "mesonbuild/compilers/cpp.py", "diff": "@@ -12,10 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import functools\n import os.path\n \n from .. import coredata\n-from ..mesonlib import version_compare\n+from .. import mlog\n+from ..mesonlib import MesonException, version_compare\n \n from .c import CCompiler, VisualStudioCCompiler\n from .compilers import (\n@@ -67,6 +69,55 @@ class CPPCompiler(CCompiler):\n int main () {{ return 0; }}'''\n return self.compiles(t.format(**fargs), env, extra_args, dependencies)\n \n+ def _test_cpp_std_arg(self, cpp_std_value):\n+ # Test whether the compiler understands a -std=XY argument\n+ assert(cpp_std_value.startswith('-std='))\n+\n+ # This test does not use has_multi_arguments() for two reasons:\n+ # 1. has_multi_arguments() requires an env argument, which the compiler\n+ # object does not have at this point.\n+ # 2. even if it did have an env object, that might contain another more\n+ # recent -std= argument, which might lead to a cascaded failure.\n+ CPP_TEST = 'int i = static_cast<int>(0);'\n+ with self.compile(code=CPP_TEST, extra_args=[cpp_std_value], mode='compile') as p:\n+ if p.returncode == 0:\n+ mlog.debug('Compiler accepts {}:'.format(cpp_std_value), 'YES')\n+ return True\n+ else:\n+ mlog.debug('Compiler accepts {}:'.format(cpp_std_value), 'NO')\n+ return False\n+\n+ @functools.lru_cache()\n+ def _find_best_cpp_std(self, cpp_std):\n+ # The initial version mapping approach to make falling back\n+ # from '-std=c++14' to '-std=c++1y' was too brittle. For instance,\n+ # Apple's Clang uses a different versioning scheme to upstream LLVM,\n+ # making the whole detection logic awfully brittle. Instead, let's\n+ # just see if feeding GCC or Clang our '-std=' setting works, and\n+ # if not, try the fallback argument.\n+ CPP_FALLBACKS = {\n+ 'c++11': 'c++0x',\n+ 'gnu++11': 'gnu++0x',\n+ 'c++14': 'c++1y',\n+ 'gnu++14': 'gnu++1y',\n+ 'c++17': 'c++1z',\n+ 'gnu++17': 'gnu++1z'\n+ }\n+\n+ # Currently, remapping is only supported for Clang and GCC\n+ assert(self.id in frozenset(['clang', 'gcc']))\n+\n+ if cpp_std not in CPP_FALLBACKS:\n+ # 'c++03' and 'c++98' don't have fallback types\n+ return '-std=' + cpp_std\n+\n+ for i in (cpp_std, CPP_FALLBACKS[cpp_std]):\n+ cpp_std_value = '-std=' + i\n+ if self._test_cpp_std_arg(cpp_std_value):\n+ return cpp_std_value\n+\n+ raise MesonException('C++ Compiler does not support -std={}'.format(cpp_std))\n+\n \n class ClangCPPCompiler(ClangCompiler, CPPCompiler):\n def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None, **kwargs):\n@@ -89,11 +140,7 @@ class ClangCPPCompiler(ClangCompiler, CPPCompiler):\n args = []\n std = options['cpp_std']\n if std.value != 'none':\n- cpp_std_value = std.value\n- # Clang 3.2, 3.3, 3.4 only understand -std={c,gnu}++1y and not -std={c,gnu}++14\n- if version_compare(self.version, '>=3.2') and version_compare(self.version, '<3.5'):\n- cpp_std_value = cpp_std_value.replace('++14', '++1y')\n- args.append('-std=' + cpp_std_value)\n+ args.append(self._find_best_cpp_std(std.value))\n return args\n \n def get_option_link_args(self, options):\n@@ -159,11 +206,7 @@ class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n args = []\n std = options['cpp_std']\n if std.value != 'none':\n- cpp_std_value = std.value\n- # GCC 4.8 only understands -std={c,gnu}++1y and not -std={c,gnu}++14\n- if version_compare(self.version, '>=4.8') and version_compare(self.version, '<4.9'):\n- cpp_std_value = cpp_std_value.replace('++14', '++1y')\n- args.append('-std=' + cpp_std_value)\n+ args.append(self._find_best_cpp_std(std.value))\n if options['cpp_debugstl'].value:\n args.append('-D_GLIBCXX_DEBUG=1')\n return args\n" } ]
69ec001b0672094ab92c07f5e561c9c0525aef7b
mesonbuild/meson
15.09.2018 13:13:50
Apache License 2.0
Use enum instead of `int` for compiler variants * Enums are strongly typed and make the whole `gcc_type`/`clang_type`/`icc_type` distinction redundant. * Enums also allow extending via member functions, which makes the code more generalisable.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/__init__.py", "new_path": "mesonbuild/compilers/__init__.py", "diff": "@@ -14,16 +14,7 @@\n \n # Public symbols for compilers sub-package when using 'from . import compilers'\n __all__ = [\n- 'CLANG_OSX',\n- 'CLANG_STANDARD',\n- 'CLANG_WIN',\n- 'GCC_CYGWIN',\n- 'GCC_MINGW',\n- 'GCC_OSX',\n- 'GCC_STANDARD',\n- 'ICC_OSX',\n- 'ICC_STANDARD',\n- 'ICC_WIN',\n+ 'CompilerType',\n \n 'all_languages',\n 'base_options',\n@@ -94,16 +85,7 @@ __all__ = [\n \n # Bring symbols from each module into compilers sub-package namespace\n from .compilers import (\n- GCC_OSX,\n- GCC_MINGW,\n- GCC_CYGWIN,\n- GCC_STANDARD,\n- CLANG_OSX,\n- CLANG_WIN,\n- CLANG_STANDARD,\n- ICC_OSX,\n- ICC_WIN,\n- ICC_STANDARD,\n+ CompilerType,\n all_languages,\n base_options,\n clib_langs,\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/c.py", "new_path": "mesonbuild/compilers/c.py", "diff": "@@ -30,7 +30,7 @@ from ..mesonlib import (\n from .c_function_attributes import C_FUNC_ATTRIBUTES\n \n from .compilers import (\n- GCC_MINGW,\n+ CompilerType,\n get_largefile_args,\n gnu_winlibs,\n msvc_winlibs,\n@@ -121,7 +121,7 @@ class CCompiler(Compiler):\n # The default behavior is this, override in MSVC\n @functools.lru_cache(maxsize=None)\n def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):\n- if self.id == 'clang' and self.clang_type == compilers.CLANG_OSX:\n+ if self.id == 'clang' and self.compiler_type == CompilerType.CLANG_OSX:\n return self.build_osx_rpath_args(build_dir, rpath_paths, build_rpath)\n return self.build_unix_rpath_args(build_dir, from_dir, rpath_paths, build_rpath, install_rpath)\n \n@@ -160,15 +160,8 @@ class CCompiler(Compiler):\n '''\n Get args for allowing undefined symbols when linking to a shared library\n '''\n- if self.id == 'clang':\n- if self.clang_type == compilers.CLANG_OSX:\n- # Apple ld\n- return ['-Wl,-undefined,dynamic_lookup']\n- else:\n- # GNU ld and LLVM lld\n- return ['-Wl,--allow-shlib-undefined']\n- elif self.id == 'gcc':\n- if self.gcc_type == compilers.GCC_OSX:\n+ if self.id in ('clang', 'gcc'):\n+ if self.compiler_type.is_osx_compiler:\n # Apple ld\n return ['-Wl,-undefined,dynamic_lookup']\n else:\n@@ -1064,9 +1057,9 @@ class CCompiler(Compiler):\n \n \n class ClangCCompiler(ClangCompiler, CCompiler):\n- def __init__(self, exelist, version, clang_type, is_cross, exe_wrapper=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- ClangCompiler.__init__(self, clang_type)\n+ ClangCompiler.__init__(self, compiler_type)\n default_warn_args = ['-Wall', '-Winvalid-pch']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -1092,7 +1085,7 @@ class ClangCCompiler(ClangCompiler, CCompiler):\n \n def get_linker_always_args(self):\n basic = super().get_linker_always_args()\n- if self.clang_type == compilers.CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return basic + ['-Wl,-headerpad_max_install_names']\n return basic\n \n@@ -1126,9 +1119,9 @@ class ArmclangCCompiler(ArmclangCompiler, CCompiler):\n \n \n class GnuCCompiler(GnuCompiler, CCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -1140,7 +1133,7 @@ class GnuCCompiler(GnuCompiler, CCompiler):\n ['none', 'c89', 'c99', 'c11',\n 'gnu89', 'gnu99', 'gnu11'],\n 'none')})\n- if self.gcc_type == GCC_MINGW:\n+ if self.compiler_type == CompilerType.GCC_MINGW:\n opts.update({\n 'c_winlibs': coredata.UserArrayOption('c_winlibs', 'Standard Win libraries to link against',\n gnu_winlibs), })\n@@ -1154,7 +1147,7 @@ class GnuCCompiler(GnuCompiler, CCompiler):\n return args\n \n def get_option_link_args(self, options):\n- if self.gcc_type == GCC_MINGW:\n+ if self.compiler_type == CompilerType.GCC_MINGW:\n return options['c_winlibs'].value[:]\n return []\n \n@@ -1166,9 +1159,9 @@ class GnuCCompiler(GnuCompiler, CCompiler):\n \n \n class ElbrusCCompiler(GnuCCompiler, ElbrusCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n- GnuCCompiler.__init__(self, exelist, version, gcc_type, is_cross, exe_wrapper, defines, **kwargs)\n- ElbrusCompiler.__init__(self, gcc_type, defines)\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ GnuCCompiler.__init__(self, exelist, version, compiler_type, is_cross, exe_wrapper, defines, **kwargs)\n+ ElbrusCompiler.__init__(self, compiler_type, defines)\n \n # It does support some various ISO standards and c/gnu 90, 9x, 1x in addition to those which GNU CC supports.\n def get_options(self):\n@@ -1190,9 +1183,9 @@ class ElbrusCCompiler(GnuCCompiler, ElbrusCompiler):\n \n \n class IntelCCompiler(IntelCompiler, CCompiler):\n- def __init__(self, exelist, version, icc_type, is_cross, exe_wrapper=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- IntelCompiler.__init__(self, icc_type)\n+ IntelCompiler.__init__(self, compiler_type)\n self.lang_header = 'c-header'\n default_warn_args = ['-Wall', '-w3', '-diag-disable:remark', '-Wpch-messages']\n self.warn_args = {'1': default_warn_args,\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/compilers.py", "new_path": "mesonbuild/compilers/compilers.py", "diff": "@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import contextlib, os.path, re, tempfile, shlex\n+import contextlib, enum, os.path, re, tempfile, shlex\n import subprocess\n \n from ..linkers import StaticLinker\n@@ -1141,19 +1141,35 @@ class Compiler:\n raise EnvironmentException(\n 'Language {} does not support function attributes.'.format(self.get_display_language()))\n \n-GCC_STANDARD = 0\n-GCC_OSX = 1\n-GCC_MINGW = 2\n-GCC_CYGWIN = 3\n \n-CLANG_STANDARD = 0\n-CLANG_OSX = 1\n-CLANG_WIN = 2\n-# Possibly clang-cl?\n+@enum.unique\n+class CompilerType(enum.Enum):\n+ GCC_STANDARD = 0\n+ GCC_OSX = 1\n+ GCC_MINGW = 2\n+ GCC_CYGWIN = 3\n+\n+ CLANG_STANDARD = 10\n+ CLANG_OSX = 11\n+ CLANG_MINGW = 12\n+ # Possibly clang-cl?\n+\n+ ICC_STANDARD = 20\n+ ICC_OSX = 21\n+ ICC_WIN = 22\n+\n+ @property\n+ def is_standard_compiler(self):\n+ return self.name in ('GCC_STANDARD', 'CLANG_STANDARD', 'ICC_STANDARD')\n+\n+ @property\n+ def is_osx_compiler(self):\n+ return self.name in ('GCC_OSX', 'CLANG_OSX', 'ICC_OSX')\n+\n+ @property\n+ def is_windows_compiler(self):\n+ return self.name in ('GCC_MINGW', 'GCC_CYGWIN', 'CLANG_MINGW', 'ICC_WIN')\n \n-ICC_STANDARD = 0\n-ICC_OSX = 1\n-ICC_WIN = 2\n \n # GNU ld cannot be installed on macOS\n # https://github.com/Homebrew/homebrew-core/issues/17794#issuecomment-328174395\n@@ -1169,14 +1185,14 @@ def get_macos_dylib_install_name(prefix, shlib_name, suffix, soversion):\n install_name += '.dylib'\n return '@rpath/' + install_name\n \n-def get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, soversion, darwin_versions, is_shared_module):\n- if gcc_type == GCC_STANDARD:\n+def get_gcc_soname_args(compiler_type, prefix, shlib_name, suffix, soversion, darwin_versions, is_shared_module):\n+ if compiler_type.is_standard_compiler:\n sostr = '' if soversion is None else '.' + soversion\n return ['-Wl,-soname,%s%s.%s%s' % (prefix, shlib_name, suffix, sostr)]\n- elif gcc_type in (GCC_MINGW, GCC_CYGWIN):\n+ elif compiler_type.is_windows_compiler:\n # For PE/COFF the soname argument has no effect with GNU LD\n return []\n- elif gcc_type == GCC_OSX:\n+ elif compiler_type.is_osx_compiler:\n if is_shared_module:\n return []\n name = get_macos_dylib_install_name(prefix, shlib_name, suffix, soversion)\n@@ -1188,20 +1204,21 @@ def get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, soversion, darwin_\n raise RuntimeError('Not implemented yet.')\n \n def get_compiler_is_linuxlike(compiler):\n- if (getattr(compiler, 'gcc_type', None) == GCC_STANDARD) or \\\n- (getattr(compiler, 'clang_type', None) == CLANG_STANDARD) or \\\n- (getattr(compiler, 'icc_type', None) == ICC_STANDARD):\n- return True\n- return False\n+ compiler_type = getattr(compiler, 'compiler_type', None)\n+ return compiler_type and compiler_type.is_standard_compiler\n \n def get_compiler_uses_gnuld(c):\n # FIXME: Perhaps we should detect the linker in the environment?\n # FIXME: Assumes that *BSD use GNU ld, but they might start using lld soon\n- if (getattr(c, 'gcc_type', None) in (GCC_STANDARD, GCC_MINGW, GCC_CYGWIN)) or \\\n- (getattr(c, 'clang_type', None) in (CLANG_STANDARD, CLANG_WIN)) or \\\n- (getattr(c, 'icc_type', None) in (ICC_STANDARD, ICC_WIN)):\n- return True\n- return False\n+ compiler_type = getattr(c, 'compiler_type', None)\n+ return compiler_type in (\n+ CompilerType.GCC_STANDARD,\n+ CompilerType.GCC_MINGW,\n+ CompilerType.GCC_CYGWIN,\n+ CompilerType.CLANG_STANDARD,\n+ CompilerType.CLANG_MINGW,\n+ CompilerType.ICC_STANDARD,\n+ CompilerType.ICC_WIN)\n \n def get_largefile_args(compiler):\n '''\n@@ -1262,13 +1279,13 @@ def gnulike_default_include_dirs(compiler, lang):\n \n class GnuCompiler:\n # Functionality that is common to all GNU family compilers.\n- def __init__(self, gcc_type, defines):\n+ def __init__(self, compiler_type, defines):\n self.id = 'gcc'\n- self.gcc_type = gcc_type\n+ self.compiler_type = compiler_type\n self.defines = defines or {}\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',\n 'b_colorout', 'b_ndebug', 'b_staticpic']\n- if self.gcc_type == GCC_OSX:\n+ if self.compiler_type.is_osx_compiler:\n self.base_options.append('b_bitcode')\n else:\n self.base_options.append('b_lundef')\n@@ -1279,7 +1296,7 @@ class GnuCompiler:\n # TODO: centralise this policy more globally, instead\n # of fragmenting it into GnuCompiler and ClangCompiler\n def get_asneeded_args(self):\n- if self.gcc_type == GCC_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return APPLE_LD_AS_NEEDED\n else:\n return GNU_LD_AS_NEEDED\n@@ -1305,7 +1322,7 @@ class GnuCompiler:\n return self.defines[define]\n \n def get_pic_args(self):\n- if self.gcc_type in (GCC_CYGWIN, GCC_MINGW, GCC_OSX):\n+ if self.compiler_type in (CompilerType.GCC_CYGWIN, CompilerType.GCC_MINGW, CompilerType.GCC_OSX):\n return [] # On Window and OS X, pic is always on.\n return ['-fPIC']\n \n@@ -1319,7 +1336,7 @@ class GnuCompiler:\n return clike_debug_args[is_debug]\n \n def get_buildtype_linker_args(self, buildtype):\n- if self.gcc_type == GCC_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return apple_buildtype_linker_args[buildtype]\n return gnulike_buildtype_linker_args[buildtype]\n \n@@ -1330,7 +1347,7 @@ class GnuCompiler:\n return os.path.dirname(fname), fname\n \n def get_soname_args(self, *args):\n- return get_gcc_soname_args(self.gcc_type, *args)\n+ return get_gcc_soname_args(self.compiler_type, *args)\n \n def get_std_shared_lib_link_args(self):\n return ['-shared']\n@@ -1343,13 +1360,13 @@ class GnuCompiler:\n raise RuntimeError('Module definitions file should be str')\n # On Windows targets, .def files may be specified on the linker command\n # line like an object file.\n- if self.gcc_type in (GCC_CYGWIN, GCC_MINGW):\n+ if self.compiler_type in (CompilerType.GCC_CYGWIN, CompilerType.GCC_MINGW):\n return [defsfile]\n # For other targets, discard the .def file.\n return []\n \n def get_gui_app_args(self, value):\n- if self.gcc_type in (GCC_CYGWIN, GCC_MINGW) and value:\n+ if self.compiler_type in (CompilerType.GCC_CYGWIN, CompilerType.GCC_MINGW) and value:\n return ['-mwindows']\n return []\n \n@@ -1368,8 +1385,8 @@ class GnuCompiler:\n class ElbrusCompiler(GnuCompiler):\n # Elbrus compiler is nearly like GCC, but does not support\n # PCH, LTO, sanitizers and color output as of version 1.21.x.\n- def __init__(self, gcc_type, defines):\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ def __init__(self, compiler_type, defines):\n+ GnuCompiler.__init__(self, compiler_type, defines)\n self.id = 'lcc'\n self.base_options = ['b_pgo', 'b_coverage',\n 'b_ndebug', 'b_staticpic',\n@@ -1404,12 +1421,12 @@ class ElbrusCompiler(GnuCompiler):\n return paths\n \n class ClangCompiler:\n- def __init__(self, clang_type):\n+ def __init__(self, compiler_type):\n self.id = 'clang'\n- self.clang_type = clang_type\n+ self.compiler_type = compiler_type\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',\n 'b_ndebug', 'b_staticpic', 'b_colorout']\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n self.base_options.append('b_bitcode')\n else:\n self.base_options.append('b_lundef')\n@@ -1420,13 +1437,13 @@ class ClangCompiler:\n # TODO: centralise this policy more globally, instead\n # of fragmenting it into GnuCompiler and ClangCompiler\n def get_asneeded_args(self):\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return APPLE_LD_AS_NEEDED\n else:\n return GNU_LD_AS_NEEDED\n \n def get_pic_args(self):\n- if self.clang_type in (CLANG_WIN, CLANG_OSX):\n+ if self.compiler_type in (CompilerType.CLANG_MINGW, CompilerType.CLANG_OSX):\n return [] # On Window and OS X, pic is always on.\n return ['-fPIC']\n \n@@ -1437,7 +1454,7 @@ class ClangCompiler:\n return gnulike_buildtype_args[buildtype]\n \n def get_buildtype_linker_args(self, buildtype):\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return apple_buildtype_linker_args[buildtype]\n return gnulike_buildtype_linker_args[buildtype]\n \n@@ -1457,15 +1474,7 @@ class ClangCompiler:\n return ['-include-pch', os.path.join(pch_dir, self.get_pch_name(header))]\n \n def get_soname_args(self, *args):\n- if self.clang_type == CLANG_STANDARD:\n- gcc_type = GCC_STANDARD\n- elif self.clang_type == CLANG_OSX:\n- gcc_type = GCC_OSX\n- elif self.clang_type == CLANG_WIN:\n- gcc_type = GCC_MINGW\n- else:\n- raise MesonException('Unreachable code when converting clang type to gcc type.')\n- return get_gcc_soname_args(gcc_type, *args)\n+ return get_gcc_soname_args(self.compiler_type, *args)\n \n def has_multi_arguments(self, args, env):\n myargs = ['-Werror=unknown-warning-option', '-Werror=unused-command-line-argument']\n@@ -1482,17 +1491,17 @@ class ClangCompiler:\n # visibility to obey OS X and iOS minimum version targets with\n # -mmacosx-version-min, -miphoneos-version-min, etc.\n # https://github.com/Homebrew/homebrew-core/issues/3727\n- if self.clang_type == CLANG_OSX and version_compare(self.version, '>=8.0'):\n+ if self.compiler_type.is_osx_compiler and version_compare(self.version, '>=8.0'):\n extra_args.append('-Wl,-no_weak_imports')\n return super().has_function(funcname, prefix, env, extra_args, dependencies)\n \n def get_std_shared_module_link_args(self, options):\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return ['-bundle', '-Wl,-undefined,dynamic_lookup']\n return ['-shared']\n \n def get_link_whole_for(self, args):\n- if self.clang_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n result = []\n for a in args:\n result += ['-Wl,-force_load', a]\n@@ -1593,9 +1602,9 @@ class ArmclangCompiler:\n \n # Tested on linux for ICC 14.0.3, 15.0.6, 16.0.4, 17.0.1\n class IntelCompiler:\n- def __init__(self, icc_type):\n+ def __init__(self, compiler_type):\n self.id = 'intel'\n- self.icc_type = icc_type\n+ self.compiler_type = compiler_type\n self.lang_header = 'none'\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',\n 'b_colorout', 'b_ndebug', 'b_staticpic', 'b_lundef', 'b_asneeded']\n@@ -1625,27 +1634,19 @@ class IntelCompiler:\n return os.path.dirname(fname), fname\n \n def get_soname_args(self, *args):\n- if self.icc_type == ICC_STANDARD:\n- gcc_type = GCC_STANDARD\n- elif self.icc_type == ICC_OSX:\n- gcc_type = GCC_OSX\n- elif self.icc_type == ICC_WIN:\n- gcc_type = GCC_MINGW\n- else:\n- raise MesonException('Unreachable code when converting icc type to gcc type.')\n- return get_gcc_soname_args(gcc_type, *args)\n+ return get_gcc_soname_args(self.compiler_type, *args)\n \n # TODO: centralise this policy more globally, instead\n # of fragmenting it into GnuCompiler and ClangCompiler\n def get_asneeded_args(self):\n- if self.icc_type == CLANG_OSX:\n+ if self.compiler_type.is_osx_compiler:\n return APPLE_LD_AS_NEEDED\n else:\n return GNU_LD_AS_NEEDED\n \n def get_std_shared_lib_link_args(self):\n # FIXME: Don't know how icc works on OSX\n- # if self.icc_type == ICC_OSX:\n+ # if self.compiler_type.is_osx_compiler:\n # return ['-bundle']\n return ['-shared']\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/cpp.py", "new_path": "mesonbuild/compilers/cpp.py", "diff": "@@ -21,7 +21,7 @@ from ..mesonlib import MesonException, version_compare\n \n from .c import CCompiler, VisualStudioCCompiler\n from .compilers import (\n- GCC_MINGW,\n+ CompilerType,\n gnu_winlibs,\n msvc_winlibs,\n ClangCompiler,\n@@ -126,9 +126,9 @@ class CPPCompiler(CCompiler):\n \n \n class ClangCPPCompiler(ClangCompiler, CPPCompiler):\n- def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- ClangCompiler.__init__(self, cltype)\n+ ClangCompiler.__init__(self, compiler_type)\n default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -185,9 +185,9 @@ class ArmclangCPPCompiler(ArmclangCompiler, CPPCompiler):\n \n \n class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrap, defines, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrap, defines, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -202,7 +202,7 @@ class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n 'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',\n 'STL debug mode',\n False)})\n- if self.gcc_type == GCC_MINGW:\n+ if self.compiler_type == CompilerType.GCC_MINGW:\n opts.update({\n 'cpp_winlibs': coredata.UserArrayOption('cpp_winlibs', 'Standard Win libraries to link against',\n gnu_winlibs), })\n@@ -218,7 +218,7 @@ class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n return args\n \n def get_option_link_args(self, options):\n- if self.gcc_type == GCC_MINGW:\n+ if self.compiler_type == CompilerType.GCC_MINGW:\n return options['cpp_winlibs'].value[:]\n return []\n \n@@ -230,9 +230,9 @@ class GnuCPPCompiler(GnuCompiler, CPPCompiler):\n \n \n class ElbrusCPPCompiler(GnuCPPCompiler, ElbrusCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n- GnuCPPCompiler.__init__(self, exelist, version, gcc_type, is_cross, exe_wrapper, defines, **kwargs)\n- ElbrusCompiler.__init__(self, gcc_type, defines)\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ GnuCPPCompiler.__init__(self, exelist, version, compiler_type, is_cross, exe_wrapper, defines, **kwargs)\n+ ElbrusCompiler.__init__(self, compiler_type, defines)\n \n # It does not support c++/gnu++ 17 and 1z, but still does support 0x, 1y, and gnu++98.\n def get_options(self):\n@@ -253,9 +253,9 @@ class ElbrusCPPCompiler(GnuCPPCompiler, ElbrusCompiler):\n \n \n class IntelCPPCompiler(IntelCompiler, CPPCompiler):\n- def __init__(self, exelist, version, icc_type, is_cross, exe_wrap, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrap, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)\n- IntelCompiler.__init__(self, icc_type)\n+ IntelCompiler.__init__(self, compiler_type)\n self.lang_header = 'c++-header'\n default_warn_args = ['-Wall', '-w3', '-diag-disable:remark',\n '-Wpch-messages', '-Wnon-virtual-dtor']\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/d.py", "new_path": "mesonbuild/compilers/d.py", "diff": "@@ -17,8 +17,7 @@ import os.path, subprocess\n from ..mesonlib import EnvironmentException, version_compare, is_windows, is_osx\n \n from .compilers import (\n- GCC_STANDARD,\n- GCC_OSX,\n+ CompilerType,\n d_dmd_buildtype_args,\n d_gdc_buildtype_args,\n d_ldc_buildtype_args,\n@@ -152,12 +151,12 @@ class DCompiler(Compiler):\n if is_windows():\n return []\n elif is_osx():\n- soname_args = get_gcc_soname_args(GCC_OSX, *args)\n+ soname_args = get_gcc_soname_args(CompilerType.GCC_OSX, *args)\n if soname_args:\n return ['-Wl,' + ','.join(soname_args)]\n return []\n \n- return get_gcc_soname_args(GCC_STANDARD, *args)\n+ return get_gcc_soname_args(CompilerType.GCC_STANDARD, *args)\n \n def get_feature_args(self, kwargs, build_to_src):\n res = []\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/fortran.py", "new_path": "mesonbuild/compilers/fortran.py", "diff": "@@ -14,7 +14,7 @@\n \n from .c import CCompiler\n from .compilers import (\n- ICC_STANDARD,\n+ CompilerType,\n apple_buildtype_linker_args,\n gnulike_buildtype_args,\n gnulike_buildtype_linker_args,\n@@ -257,9 +257,9 @@ end program prog\n \n \n class GnuFortranCompiler(GnuCompiler, FortranCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n FortranCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n@@ -279,9 +279,9 @@ class GnuFortranCompiler(GnuCompiler, FortranCompiler):\n \n \n class ElbrusFortranCompiler(GnuFortranCompiler, ElbrusCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n- GnuFortranCompiler.__init__(self, exelist, version, gcc_type, is_cross, exe_wrapper, defines, **kwargs)\n- ElbrusCompiler.__init__(self, gcc_type, defines)\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):\n+ GnuFortranCompiler.__init__(self, exelist, version, compiler_type, is_cross, exe_wrapper, defines, **kwargs)\n+ ElbrusCompiler.__init__(self, compiler_type, defines)\n \n class G95FortranCompiler(FortranCompiler):\n def __init__(self, exelist, version, is_cross, exe_wrapper=None, **kwags):\n@@ -330,7 +330,7 @@ class IntelFortranCompiler(IntelCompiler, FortranCompiler):\n FortranCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwags)\n # FIXME: Add support for OS X and Windows in detect_fortran_compiler so\n # we are sent the type of compiler\n- IntelCompiler.__init__(self, ICC_STANDARD)\n+ IntelCompiler.__init__(self, CompilerType.ICC_STANDARD)\n self.id = 'intel'\n default_warn_args = ['-warn', 'general', '-warn', 'truncated_source']\n self.warn_args = {'1': default_warn_args,\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/objc.py", "new_path": "mesonbuild/compilers/objc.py", "diff": "@@ -51,17 +51,21 @@ class ObjCCompiler(CCompiler):\n \n \n class GnuObjCCompiler(GnuCompiler, ObjCCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None):\n ObjCCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n \n \n-class ClangObjCCompiler(ClangCompiler, GnuObjCCompiler):\n- def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None):\n- GnuObjCCompiler.__init__(self, exelist, version, cltype, is_cross, exe_wrapper)\n- ClangCompiler.__init__(self, cltype)\n+class ClangObjCCompiler(ClangCompiler, ObjCCompiler):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None):\n+ ObjCCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)\n+ ClangCompiler.__init__(self, compiler_type)\n+ default_warn_args = ['-Wall', '-Winvalid-pch']\n+ self.warn_args = {'1': default_warn_args,\n+ '2': default_warn_args + ['-Wextra'],\n+ '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage']\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/compilers/objcpp.py", "new_path": "mesonbuild/compilers/objcpp.py", "diff": "@@ -52,17 +52,21 @@ class ObjCPPCompiler(CPPCompiler):\n \n \n class GnuObjCPPCompiler(GnuCompiler, ObjCPPCompiler):\n- def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None):\n ObjCPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)\n- GnuCompiler.__init__(self, gcc_type, defines)\n+ GnuCompiler.__init__(self, compiler_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n \n \n-class ClangObjCPPCompiler(ClangCompiler, GnuObjCPPCompiler):\n- def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None):\n- GnuObjCPPCompiler.__init__(self, exelist, version, cltype, is_cross, exe_wrapper)\n- ClangCompiler.__init__(self, cltype)\n+class ClangObjCPPCompiler(ClangCompiler, ObjCPPCompiler):\n+ def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None):\n+ ObjCPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)\n+ ClangCompiler.__init__(self, compiler_type)\n+ default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n+ self.warn_args = {'1': default_warn_args,\n+ '2': default_warn_args + ['-Wextra'],\n+ '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage']\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/environment.py", "new_path": "mesonbuild/environment.py", "diff": "@@ -22,14 +22,7 @@ from . import mlog\n \n from . import compilers\n from .compilers import (\n- CLANG_OSX,\n- CLANG_STANDARD,\n- CLANG_WIN,\n- GCC_CYGWIN,\n- GCC_MINGW,\n- GCC_OSX,\n- GCC_STANDARD,\n- ICC_STANDARD,\n+ CompilerType,\n is_assembly,\n is_header,\n is_library,\n@@ -451,12 +444,12 @@ class Environment:\n def get_gnu_compiler_type(defines):\n # Detect GCC type (Apple, MinGW, Cygwin, Unix)\n if '__APPLE__' in defines:\n- return GCC_OSX\n+ return CompilerType.GCC_OSX\n elif '__MINGW32__' in defines or '__MINGW64__' in defines:\n- return GCC_MINGW\n+ return CompilerType.GCC_MINGW\n elif '__CYGWIN__' in defines:\n- return GCC_CYGWIN\n- return GCC_STANDARD\n+ return CompilerType.GCC_CYGWIN\n+ return CompilerType.GCC_STANDARD\n \n def warn_about_lang_pointing_to_cross(self, compiler_exe, evar):\n evar_str = os.environ.get(evar, 'WHO_WOULD_CALL_THEIR_COMPILER_WITH_THIS_NAME')\n@@ -560,14 +553,14 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n if not defines:\n popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'\n continue\n- gtype = self.get_gnu_compiler_type(defines)\n+ compiler_type = self.get_gnu_compiler_type(defines)\n if guess_gcc_or_lcc == 'lcc':\n version = self.get_lcc_version_from_defines(defines)\n cls = ElbrusCCompiler if lang == 'c' else ElbrusCPPCompiler\n else:\n version = self.get_gnu_version_from_defines(defines)\n cls = GnuCCompiler if lang == 'c' else GnuCPPCompiler\n- return cls(ccache + compiler, version, gtype, is_cross, exe_wrap, defines, full_version=full_version)\n+ return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines, full_version=full_version)\n \n if 'armclang' in out:\n # The compiler version is not present in the first line of output,\n@@ -587,13 +580,13 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n return cls(ccache + compiler, version, is_cross, exe_wrap, full_version=full_version)\n if 'clang' in out:\n if 'Apple' in out or mesonlib.for_darwin(want_cross, self):\n- cltype = CLANG_OSX\n+ compiler_type = CompilerType.CLANG_OSX\n elif 'windows' in out or mesonlib.for_windows(want_cross, self):\n- cltype = CLANG_WIN\n+ compiler_type = CompilerType.CLANG_MINGW\n else:\n- cltype = CLANG_STANDARD\n+ compiler_type = CompilerType.CLANG_STANDARD\n cls = ClangCCompiler if lang == 'c' else ClangCPPCompiler\n- return cls(ccache + compiler, version, cltype, is_cross, exe_wrap, full_version=full_version)\n+ return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version)\n if 'Microsoft' in out or 'Microsoft' in err:\n # Latest versions of Visual Studio print version\n # number to stderr but earlier ones print version\n@@ -610,9 +603,9 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n return cls(compiler, version, is_cross, exe_wrap, is_64)\n if '(ICC)' in out:\n # TODO: add microsoft add check OSX\n- inteltype = ICC_STANDARD\n+ compiler_type = CompilerType.ICC_STANDARD\n cls = IntelCCompiler if lang == 'c' else IntelCPPCompiler\n- return cls(ccache + compiler, version, inteltype, is_cross, exe_wrap, full_version=full_version)\n+ return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version)\n if 'ARM' in out:\n cls = ArmCCompiler if lang == 'c' else ArmCPPCompiler\n return cls(ccache + compiler, version, is_cross, exe_wrap, full_version=full_version)\n@@ -651,14 +644,14 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n if not defines:\n popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'\n continue\n- gtype = self.get_gnu_compiler_type(defines)\n+ compiler_type = self.get_gnu_compiler_type(defines)\n if guess_gcc_or_lcc == 'lcc':\n version = self.get_lcc_version_from_defines(defines)\n cls = ElbrusFortranCompiler\n else:\n version = self.get_gnu_version_from_defines(defines)\n cls = GnuFortranCompiler\n- return cls(compiler, version, gtype, is_cross, exe_wrap, defines, full_version=full_version)\n+ return cls(compiler, version, compiler_type, is_cross, exe_wrap, defines, full_version=full_version)\n \n if 'G95' in out:\n return G95FortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version)\n@@ -704,13 +697,13 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n if not defines:\n popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'\n continue\n- gtype = self.get_gnu_compiler_type(defines)\n+ compiler_type = self.get_gnu_compiler_type(defines)\n version = self.get_gnu_version_from_defines(defines)\n- return GnuObjCCompiler(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n+ return GnuObjCCompiler(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines)\n if out.startswith('Apple LLVM'):\n- return ClangObjCCompiler(ccache + compiler, version, CLANG_OSX, is_cross, exe_wrap)\n+ return ClangObjCCompiler(ccache + compiler, version, CompilerType.CLANG_OSX, is_cross, exe_wrap)\n if out.startswith('clang'):\n- return ClangObjCCompiler(ccache + compiler, version, CLANG_STANDARD, is_cross, exe_wrap)\n+ return ClangObjCCompiler(ccache + compiler, version, CompilerType.CLANG_STANDARD, is_cross, exe_wrap)\n self._handle_exceptions(popen_exceptions, compilers)\n \n def detect_objcpp_compiler(self, want_cross):\n@@ -731,13 +724,13 @@ This is probably wrong, it should always point to the native compiler.''' % evar\n if not defines:\n popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'\n continue\n- gtype = self.get_gnu_compiler_type(defines)\n+ compiler_type = self.get_gnu_compiler_type(defines)\n version = self.get_gnu_version_from_defines(defines)\n- return GnuObjCPPCompiler(ccache + compiler, version, gtype, is_cross, exe_wrap, defines)\n+ return GnuObjCPPCompiler(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines)\n if out.startswith('Apple LLVM'):\n- return ClangObjCPPCompiler(ccache + compiler, version, CLANG_OSX, is_cross, exe_wrap)\n+ return ClangObjCPPCompiler(ccache + compiler, version, CompilerType.CLANG_OSX, is_cross, exe_wrap)\n if out.startswith('clang'):\n- return ClangObjCPPCompiler(ccache + compiler, version, CLANG_STANDARD, is_cross, exe_wrap)\n+ return ClangObjCPPCompiler(ccache + compiler, version, CompilerType.CLANG_STANDARD, is_cross, exe_wrap)\n self._handle_exceptions(popen_exceptions, compilers)\n \n def detect_java_compiler(self):\n" }, { "change_type": "MODIFY", "old_path": "run_unittests.py", "new_path": "run_unittests.py", "diff": "@@ -265,7 +265,7 @@ class InternalTests(unittest.TestCase):\n def test_compiler_args_class_gnuld(self):\n cargsfunc = mesonbuild.compilers.CompilerArgs\n ## Test --start/end-group\n- gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', 0, False)\n+ gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', mesonbuild.compilers.CompilerType.GCC_STANDARD, False)\n ## Test that 'direct' append and extend works\n l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])\n self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])\n@@ -1642,30 +1642,30 @@ class AllPlatformTests(BasePlatformTests):\n if isinstance(cc, gnu):\n self.assertIsInstance(linker, ar)\n if is_osx():\n- self.assertEqual(cc.gcc_type, mesonbuild.compilers.GCC_OSX)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_OSX)\n elif is_windows():\n- self.assertEqual(cc.gcc_type, mesonbuild.compilers.GCC_MINGW)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_MINGW)\n elif is_cygwin():\n- self.assertEqual(cc.gcc_type, mesonbuild.compilers.GCC_CYGWIN)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_CYGWIN)\n else:\n- self.assertEqual(cc.gcc_type, mesonbuild.compilers.GCC_STANDARD)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_STANDARD)\n if isinstance(cc, clang):\n self.assertIsInstance(linker, ar)\n if is_osx():\n- self.assertEqual(cc.clang_type, mesonbuild.compilers.CLANG_OSX)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_OSX)\n elif is_windows():\n # Not implemented yet\n- self.assertEqual(cc.clang_type, mesonbuild.compilers.CLANG_WIN)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_MINGW)\n else:\n- self.assertEqual(cc.clang_type, mesonbuild.compilers.CLANG_STANDARD)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_STANDARD)\n if isinstance(cc, intel):\n self.assertIsInstance(linker, ar)\n if is_osx():\n- self.assertEqual(cc.icc_type, mesonbuild.compilers.ICC_OSX)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_OSX)\n elif is_windows():\n- self.assertEqual(cc.icc_type, mesonbuild.compilers.ICC_WIN)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_WIN)\n else:\n- self.assertEqual(cc.icc_type, mesonbuild.compilers.ICC_STANDARD)\n+ self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_STANDARD)\n if isinstance(cc, msvc):\n self.assertTrue(is_windows())\n self.assertIsInstance(linker, lib)\n@@ -3457,11 +3457,11 @@ class LinuxlikeTests(BasePlatformTests):\n for v in compiler.get_options()[lang_std].choices:\n if (compiler.get_id() == 'clang' and '17' in v and\n (version_compare(compiler.version, '<5.0.0') or\n- (compiler.clang_type == mesonbuild.compilers.CLANG_OSX and version_compare(compiler.version, '<9.1')))):\n+ (compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX and version_compare(compiler.version, '<9.1')))):\n continue\n if (compiler.get_id() == 'clang' and '2a' in v and\n (version_compare(compiler.version, '<6.0.0') or\n- (compiler.clang_type == mesonbuild.compilers.CLANG_OSX and version_compare(compiler.version, '<9.1')))):\n+ (compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX and version_compare(compiler.version, '<9.1')))):\n continue\n if (compiler.get_id() == 'gcc' and '2a' in v and version_compare(compiler.version, '<8.0.0')):\n continue\n" } ]
37067a53c4b3b99982ef8e1f431ba0c9302b66e8
mesonbuild/meson
13.05.2018 10:36:58
Apache License 2.0
Use a single ArgumentParser for all subcommands This has the adventage that "meson --help" shows a list of all commands, making them discoverable. This also reduce the manual parsing of arguments to the strict minimum needed for backward compatibility.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/mconf.py", "new_path": "mesonbuild/mconf.py", "diff": "@@ -13,17 +13,13 @@\n # limitations under the License.\n \n import os\n-import argparse\n from . import (coredata, mesonlib, build)\n \n-def buildparser():\n- parser = argparse.ArgumentParser(prog='meson configure')\n+def add_arguments(parser):\n coredata.register_builtin_arguments(parser)\n-\n parser.add_argument('builddir', nargs='?', default='.')\n parser.add_argument('--clearcache', action='store_true', default=False,\n help='Clear cached state (e.g. found dependencies)')\n- return parser\n \n \n class ConfException(mesonlib.MesonException):\n@@ -149,9 +145,7 @@ class Conf:\n self.print_options('Testing options', test_options)\n \n \n-def run(args):\n- args = mesonlib.expand_arguments(args)\n- options = buildparser().parse_args(args)\n+def run(options):\n coredata.parse_cmd_line_options(options)\n builddir = os.path.abspath(os.path.realpath(options.builddir))\n try:\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mesonmain.py", "new_path": "mesonbuild/mesonmain.py", "diff": "@@ -15,11 +15,97 @@\n import sys\n import os.path\n import importlib\n+import traceback\n+import argparse\n \n from . import mesonlib\n from . import mlog\n+from . import mconf, minit, minstall, mintro, msetup, mtest, rewriter\n from .mesonlib import MesonException\n from .environment import detect_msys2_arch\n+from .wrap import wraptool\n+\n+\n+class CommandLineParser:\n+ def __init__(self):\n+ self.commands = {}\n+ self.parser = argparse.ArgumentParser(prog='meson')\n+ self.subparsers = self.parser.add_subparsers(title='Commands',\n+ description='If no command is specified it defaults to setup command.')\n+ self.add_command('setup', msetup.add_arguments, msetup.run,\n+ help='Configure the project')\n+ self.add_command('configure', mconf.add_arguments, mconf.run,\n+ help='Change project options',)\n+ self.add_command('install', minstall.add_arguments, minstall.run,\n+ help='Install the project')\n+ self.add_command('introspect', mintro.add_arguments, mintro.run,\n+ help='Introspect project')\n+ self.add_command('init', minit.add_arguments, minit.run,\n+ help='Create a new project')\n+ self.add_command('test', mtest.add_arguments, mtest.run,\n+ help='Run tests')\n+ self.add_command('rewrite', rewriter.add_arguments, rewriter.run,\n+ help='Edit project files')\n+ self.add_command('wrap', wraptool.add_arguments, wraptool.run,\n+ help='Wrap tools')\n+ self.add_command('runpython', self.add_runpython_arguments, self.run_runpython_command,\n+ help='Run a python script')\n+ self.add_command('help', self.add_help_arguments, self.run_help_command,\n+ help='Print help of a subcommand')\n+\n+ def add_command(self, name, add_arguments_func, run_func, help):\n+ p = self.subparsers.add_parser(name, help=help)\n+ add_arguments_func(p)\n+ p.set_defaults(run_func=run_func)\n+ self.commands[name] = p\n+\n+ def add_runpython_arguments(self, parser):\n+ parser.add_argument('script_file')\n+ parser.add_argument('script_args', nargs=argparse.REMAINDER)\n+\n+ def run_runpython_command(self, options):\n+ import runpy\n+ sys.argv[1:] = options.script_args\n+ runpy.run_path(options.script_file, run_name='__main__')\n+ return 0\n+\n+ def add_help_arguments(self, parser):\n+ parser.add_argument('command', nargs='?')\n+\n+ def run_help_command(self, options):\n+ if options.command:\n+ self.commands[options.command].print_help()\n+ else:\n+ self.parser.print_help()\n+ return 0\n+\n+ def run(self, args):\n+ # If first arg is not a known command, assume user wants to run the setup\n+ # command.\n+ known_commands = list(self.commands.keys()) + ['-h', '--help']\n+ if len(args) == 0 or args[0] not in known_commands:\n+ args = ['setup'] + args\n+\n+ args = mesonlib.expand_arguments(args)\n+ options = self.parser.parse_args(args)\n+\n+ try:\n+ return options.run_func(options)\n+ except MesonException as e:\n+ mlog.exception(e)\n+ logfile = mlog.shutdown()\n+ if logfile is not None:\n+ mlog.log(\"\\nA full log can be found at\", mlog.bold(logfile))\n+ if os.environ.get('MESON_FORCE_BACKTRACE'):\n+ raise\n+ return 1\n+ except Exception as e:\n+ if os.environ.get('MESON_FORCE_BACKTRACE'):\n+ raise\n+ traceback.print_exc()\n+ return 2\n+ finally:\n+ mlog.shutdown()\n \n def run_script_command(script_name, script_args):\n # Map script name to module name for those that doesn't match\n@@ -50,6 +136,7 @@ def run(original_args, mainfile):\n print('You have python %s.' % sys.version)\n print('Please update your environment')\n return 1\n+\n # https://github.com/mesonbuild/meson/issues/3653\n if sys.platform.lower() == 'msys':\n mlog.error('This python3 seems to be msys/python on MSYS2 Windows, which is known to have path semantics incompatible with Meson')\n@@ -75,57 +162,7 @@ def run(original_args, mainfile):\n else:\n return run_script_command(args[1], args[2:])\n \n- if len(args) > 0:\n- # First check if we want to run a subcommand.\n- cmd_name = args[0]\n- remaining_args = args[1:]\n- # \"help\" is a special case: Since printing of the help may be\n- # delegated to a subcommand, we edit cmd_name before executing\n- # the rest of the logic here.\n- if cmd_name == 'help':\n- remaining_args += ['--help']\n- args = remaining_args\n- cmd_name = args[0]\n- if cmd_name == 'test':\n- from . import mtest\n- return mtest.run(remaining_args)\n- elif cmd_name == 'install':\n- from . import minstall\n- return minstall.run(remaining_args)\n- elif cmd_name == 'introspect':\n- from . import mintro\n- return mintro.run(remaining_args)\n- elif cmd_name == 'rewrite':\n- from . import rewriter\n- return rewriter.run(remaining_args)\n- elif cmd_name == 'configure':\n- try:\n- from . import mconf\n- return mconf.run(remaining_args)\n- except MesonException as e:\n- mlog.exception(e)\n- sys.exit(1)\n- elif cmd_name == 'wrap':\n- from .wrap import wraptool\n- return wraptool.run(remaining_args)\n- elif cmd_name == 'init':\n- from . import minit\n- return minit.run(remaining_args)\n- elif cmd_name == 'runpython':\n- import runpy\n- script_file = remaining_args[0]\n- sys.argv[1:] = remaining_args[1:]\n- runpy.run_path(script_file, run_name='__main__')\n- sys.exit(0)\n- else:\n- # If cmd_name is not a known command, assume user wants to run the\n- # setup command.\n- from . import msetup\n- if cmd_name != 'setup':\n- remaining_args = args\n- return msetup.run(remaining_args)\n-\n- return 0\n+ return CommandLineParser().run(args)\n \n def main():\n # Always resolve the command path so Ninja can find it for regen, tests, etc.\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/minit.py", "new_path": "mesonbuild/minit.py", "diff": "@@ -14,7 +14,7 @@\n \n \"\"\"Code that creates simple startup projects.\"\"\"\n \n-import os, sys, argparse, re, shutil, subprocess\n+import os, sys, re, shutil, subprocess\n from glob import glob\n from mesonbuild import mesonlib\n from mesonbuild.environment import detect_ninja\n@@ -425,8 +425,7 @@ def create_meson_build(options):\n open('meson.build', 'w').write(content)\n print('Generated meson.build file:\\n\\n' + content)\n \n-def run(args):\n- parser = argparse.ArgumentParser(prog='meson')\n+def add_arguments(parser):\n parser.add_argument(\"srcfiles\", metavar=\"sourcefile\", nargs=\"*\",\n help=\"source files. default: all recognized files in current directory\")\n parser.add_argument(\"-n\", \"--name\", help=\"project name. default: name of current directory\")\n@@ -441,7 +440,8 @@ def run(args):\n parser.add_argument('--type', default='executable',\n choices=['executable', 'library'])\n parser.add_argument('--version', default='0.1')\n- options = parser.parse_args(args)\n+\n+def run(options):\n if len(glob('*')) == 0:\n autodetect_options(options, sample=True)\n if not options.language:\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/minstall.py", "new_path": "mesonbuild/minstall.py", "diff": "@@ -14,7 +14,6 @@\n \n import sys, pickle, os, shutil, subprocess, gzip, errno\n import shlex\n-import argparse\n from glob import glob\n from .scripts import depfixer\n from .scripts import destdir_join\n@@ -33,15 +32,13 @@ build definitions so that it will not break when the change happens.'''\n \n selinux_updates = []\n \n-def buildparser():\n- parser = argparse.ArgumentParser(prog='meson install')\n+def add_arguments(parser):\n parser.add_argument('-C', default='.', dest='wd',\n help='directory to cd into before running')\n parser.add_argument('--no-rebuild', default=False, action='store_true',\n help='Do not rebuild before installing.')\n parser.add_argument('--only-changed', default=False, action='store_true',\n help='Only overwrite files that are older than the copied file.')\n- return parser\n \n class DirMaker:\n def __init__(self, lf):\n@@ -501,9 +498,7 @@ class Installer:\n else:\n raise\n \n-def run(args):\n- parser = buildparser()\n- opts = parser.parse_args(args)\n+def run(opts):\n datafilename = 'meson-private/install.dat'\n private_dir = os.path.dirname(datafilename)\n log_dir = os.path.join(private_dir, '../meson-logs')\n@@ -520,6 +515,3 @@ def run(args):\n append_to_log(lf, '# Does not contain files installed by custom scripts.')\n installer.do_install(datafilename)\n return 0\n-\n-if __name__ == '__main__':\n- sys.exit(run(sys.argv[1:]))\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mintro.py", "new_path": "mesonbuild/mintro.py", "diff": "@@ -23,12 +23,10 @@ import json\n from . import build, mtest, coredata as cdata\n from . import mesonlib\n from .backend import ninjabackend\n-import argparse\n import sys, os\n import pathlib\n \n-def buildparser():\n- parser = argparse.ArgumentParser(prog='meson introspect')\n+def add_arguments(parser):\n parser.add_argument('--targets', action='store_true', dest='list_targets', default=False,\n help='List top level targets.')\n parser.add_argument('--installed', action='store_true', dest='list_installed', default=False,\n@@ -48,7 +46,6 @@ def buildparser():\n parser.add_argument('--projectinfo', action='store_true', dest='projectinfo', default=False,\n help='Information about projects.')\n parser.add_argument('builddir', nargs='?', default='.', help='The build directory')\n- return parser\n \n def determine_installed_path(target, installdata):\n install_target = None\n@@ -206,9 +203,8 @@ def list_projinfo(builddata):\n result['subprojects'] = subprojects\n print(json.dumps(result))\n \n-def run(args):\n+def run(options):\n datadir = 'meson-private'\n- options = buildparser().parse_args(args)\n if options.builddir is not None:\n datadir = os.path.join(options.builddir, datadir)\n if not os.path.isdir(datadir):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/msetup.py", "new_path": "mesonbuild/msetup.py", "diff": "@@ -19,7 +19,6 @@ import os.path\n import platform\n import cProfile as profile\n import argparse\n-import traceback\n \n from . import environment, interpreter, mesonlib\n from . import build\n@@ -27,8 +26,7 @@ from . import mlog, coredata\n from .mesonlib import MesonException\n from .wrap import WrapMode\n \n-def buildparser():\n- parser = argparse.ArgumentParser(prog='meson')\n+def add_arguments(parser):\n coredata.register_builtin_arguments(parser)\n parser.add_argument('--cross-file', default=None,\n help='File describing cross compilation environment.')\n@@ -48,7 +46,6 @@ def buildparser():\n 'is not working.')\n parser.add_argument('builddir', nargs='?', default=None)\n parser.add_argument('sourcedir', nargs='?', default=None)\n- return parser\n \n def wrapmodetype(string):\n try:\n@@ -193,35 +190,8 @@ class MesonApp:\n os.unlink(cdf)\n raise\n \n-def run(args):\n- args = mesonlib.expand_arguments(args)\n- options = buildparser().parse_args(args)\n+def run(options):\n coredata.parse_cmd_line_options(options)\n- try:\n- app = MesonApp(options)\n- except Exception as e:\n- # Log directory does not exist, so just print\n- # to stdout.\n- print('Error during basic setup:\\n')\n- print(e)\n- return 1\n- try:\n- app.generate()\n- except Exception as e:\n- if isinstance(e, MesonException):\n- mlog.exception(e)\n- # Path to log file\n- mlog.shutdown()\n- logfile = os.path.join(app.build_dir, environment.Environment.log_dir, mlog.log_fname)\n- mlog.log(\"\\nA full log can be found at\", mlog.bold(logfile))\n- if os.environ.get('MESON_FORCE_BACKTRACE'):\n- raise\n- return 1\n- else:\n- if os.environ.get('MESON_FORCE_BACKTRACE'):\n- raise\n- traceback.print_exc()\n- return 2\n- finally:\n- mlog.shutdown()\n+ app = MesonApp(options)\n+ app.generate()\n return 0\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mtest.py", "new_path": "mesonbuild/mtest.py", "diff": "@@ -60,8 +60,7 @@ def determine_worker_count():\n num_workers = 1\n return num_workers\n \n-def buildparser():\n- parser = argparse.ArgumentParser(prog='meson test')\n+def add_arguments(parser):\n parser.add_argument('--repeat', default=1, dest='repeat', type=int,\n help='Number of times to run the tests.')\n parser.add_argument('--no-rebuild', default=False, action='store_true',\n@@ -102,7 +101,6 @@ def buildparser():\n help='Arguments to pass to the specified test(s) or all tests')\n parser.add_argument('args', nargs='*',\n help='Optional list of tests to run')\n- return parser\n \n \n def returncode_to_status(retcode):\n@@ -737,9 +735,7 @@ def rebuild_all(wd):\n \n return True\n \n-def run(args):\n- options = buildparser().parse_args(args)\n-\n+def run(options):\n if options.benchmark:\n options.num_processes = 1\n \n@@ -784,3 +780,9 @@ def run(args):\n else:\n print(e)\n return 1\n+\n+def run_with_args(args):\n+ parser = argparse.ArgumentParser(prog='meson test')\n+ add_arguments(parser)\n+ options = parser.parse_args(args)\n+ return run(options)\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/rewriter.py", "new_path": "mesonbuild/rewriter.py", "diff": "@@ -27,11 +27,8 @@ import mesonbuild.astinterpreter\n from mesonbuild.mesonlib import MesonException\n from mesonbuild import mlog\n import sys, traceback\n-import argparse\n-\n-def buildparser():\n- parser = argparse.ArgumentParser(prog='meson rewrite')\n \n+def add_arguments(parser):\n parser.add_argument('--sourcedir', default='.',\n help='Path to source directory.')\n parser.add_argument('--target', default=None,\n@@ -39,10 +36,8 @@ def buildparser():\n parser.add_argument('--filename', default=None,\n help='Name of source file to add or remove to target.')\n parser.add_argument('commands', nargs='+')\n- return parser\n \n-def run(args):\n- options = buildparser().parse_args(args)\n+def run(options):\n if options.target is None or options.filename is None:\n sys.exit(\"Must specify both target and filename.\")\n print('This tool is highly experimental, use with care.')\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/wrap/wraptool.py", "new_path": "mesonbuild/wrap/wraptool.py", "diff": "@@ -16,7 +16,6 @@ import json\n import sys, os\n import configparser\n import shutil\n-import argparse\n \n from glob import glob\n \n@@ -208,9 +207,6 @@ def status(options):\n else:\n print('', name, 'not up to date. Have %s %d, but %s %d is available.' % (current_branch, current_revision, latest_branch, latest_revision))\n \n-def run(args):\n- parser = argparse.ArgumentParser(prog='wraptool')\n- add_arguments(parser)\n- options = parser.parse_args(args)\n+def run(options):\n options.wrap_func(options)\n return 0\n" }, { "change_type": "MODIFY", "old_path": "run_project_tests.py", "new_path": "run_project_tests.py", "diff": "@@ -247,12 +247,12 @@ def run_test_inprocess(testdir):\n os.chdir(testdir)\n test_log_fname = Path('meson-logs', 'testlog.txt')\n try:\n- returncode_test = mtest.run(['--no-rebuild'])\n+ returncode_test = mtest.run_with_args(['--no-rebuild'])\n if test_log_fname.exists():\n test_log = test_log_fname.open(errors='ignore').read()\n else:\n test_log = ''\n- returncode_benchmark = mtest.run(['--no-rebuild', '--benchmark', '--logbase', 'benchmarklog'])\n+ returncode_benchmark = mtest.run_with_args(['--no-rebuild', '--benchmark', '--logbase', 'benchmarklog'])\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n" }, { "change_type": "MODIFY", "old_path": "run_tests.py", "new_path": "run_tests.py", "diff": "@@ -181,7 +181,7 @@ def run_mtest_inprocess(commandlist):\n old_stderr = sys.stderr\n sys.stderr = mystderr = StringIO()\n try:\n- returncode = mtest.run(commandlist)\n+ returncode = mtest.run_with_args(commandlist)\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n" } ]
c17a80f47b772d759aeb0878aa767a768a6fdd0c
mesonbuild/meson
28.11.2018 18:22:28
Apache License 2.0
Use correct environment for REGEN in VS backend. Try to guess which VS Command Prompt was used for the Meson call. If one is chosen invoke it before calling Meson in REGEN command.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/vs2010backend.py", "new_path": "mesonbuild/backend/vs2010backend.py", "diff": "@@ -189,6 +189,33 @@ class Vs2010Backend(backends.Backend):\n with open(filename, 'wb') as f:\n pickle.dump(regeninfo, f)\n \n+ def get_vcvars_command(self):\n+ has_arch_values = 'VSCMD_ARG_TGT_ARCH' in os.environ and 'VSCMD_ARG_HOST_ARCH' in os.environ\n+\n+ # Use vcvarsall.bat if we found it.\n+ if 'VCINSTALLDIR' in os.environ:\n+ vs_version = os.environ['VisualStudioVersion'] \\\n+ if 'VisualStudioVersion' in os.environ else None\n+ relative_path = 'Auxiliary\\\\Build\\\\' if vs_version == '15.0' else ''\n+ script_path = os.environ['VCINSTALLDIR'] + relative_path + 'vcvarsall.bat'\n+ if os.path.exists(script_path):\n+ if has_arch_values:\n+ target_arch = os.environ['VSCMD_ARG_TGT_ARCH']\n+ host_arch = os.environ['VSCMD_ARG_HOST_ARCH']\n+ else:\n+ target_arch = os.environ.get('Platform', 'x86')\n+ host_arch = target_arch\n+ arch = host_arch + '_' + target_arch if host_arch != target_arch else target_arch\n+ return '\"%s\" %s' % (script_path, arch)\n+\n+ # Otherwise try the VS2017 Developer Command Prompt.\n+ if 'VS150COMNTOOLS' in os.environ and has_arch_values:\n+ script_path = os.environ['VS150COMNTOOLS'] + 'VsDevCmd.bat'\n+ if os.path.exists(script_path):\n+ return '\"%s\" -arch=%s -host_arch=%s' % \\\n+ (script_path, os.environ['VSCMD_ARG_TGT_ARCH'], os.environ['VSCMD_ARG_HOST_ARCH'])\n+ return ''\n+\n def get_obj_target_deps(self, obj_list):\n result = {}\n for o in obj_list:\n@@ -1096,7 +1123,7 @@ class Vs2010Backend(backends.Backend):\n elif targetplatform == 'arm':\n targetmachine.text = 'MachineARM'\n else:\n- raise MesonException('Unsupported Visual Studio target machine: ' + targetmachine)\n+ raise MesonException('Unsupported Visual Studio target machine: ' + targetplatform)\n \n meson_file_group = ET.SubElement(root, 'ItemGroup')\n ET.SubElement(meson_file_group, 'None', Include=os.path.join(proj_to_src_dir, build_filename))\n@@ -1213,7 +1240,9 @@ class Vs2010Backend(backends.Backend):\n ET.SubElement(midl, 'ProxyFileName').text = '%(Filename)_p.c'\n regen_command = self.environment.get_build_command() + ['--internal', 'regencheck']\n private_dir = self.environment.get_scratch_dir()\n+ vcvars_command = self.get_vcvars_command()\n cmd_templ = '''setlocal\n+call %s > NUL\n \"%s\" \"%s\"\n if %%errorlevel%% neq 0 goto :cmEnd\n :cmEnd\n@@ -1231,7 +1260,7 @@ if %%errorlevel%% neq 0 goto :VCEnd'''\n message = ET.SubElement(custombuild, 'Message')\n message.text = 'Checking whether solution needs to be regenerated.'\n ET.SubElement(custombuild, 'Command').text = cmd_templ % \\\n- ('\" \"'.join(regen_command), private_dir)\n+ (vcvars_command, '\" \"'.join(regen_command), private_dir)\n ET.SubElement(custombuild, 'Outputs').text = Vs2010Backend.get_regen_stampfile(self.environment.get_build_dir())\n deps = self.get_regen_filelist()\n ET.SubElement(custombuild, 'AdditionalInputs').text = ';'.join(deps)\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/environment.py", "new_path": "mesonbuild/environment.py", "diff": "@@ -188,6 +188,9 @@ def detect_windows_arch(compilers):\n platform = os.environ.get('BUILD_PLAT', 'x86')\n if platform == 'Win32':\n return 'x86'\n+ elif 'VSCMD_ARG_TGT_ARCH' in os.environ:\n+ # On MSVC 2017 'Platform' is not set in VsDevCmd.bat\n+ return os.environ['VSCMD_ARG_TGT_ARCH']\n else:\n # On MSVC 2010 and later 'Platform' is only set when the\n # target arch is not 'x86'. It's 'x64' when targeting\n" } ]
08ce1fb541374fb1ddce1d7318ceb92459942e9e
mesonbuild/meson
19.09.2019 20:49:33
Apache License 2.0
Move the list of LLVM version suffixes to a common place Both scan-build and llvm-config need the same list of LLVM version suffixes. It is better to keep the list at a common place instead of having several copies in different files, which is likely to become out-of-sync when the list is updated.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/dependencies/dev.py", "new_path": "mesonbuild/dependencies/dev.py", "diff": "@@ -22,6 +22,7 @@ import re\n \n from .. import mesonlib, mlog\n from ..mesonlib import version_compare, stringlistify, extract_as_list, MachineChoice\n+from ..environment import get_llvm_tool_names\n from .base import (\n DependencyException, DependencyMethods, ExternalDependency, PkgConfigDependency,\n strip_system_libdirs, ConfigToolDependency, CMakeDependency, HasNativeKwarg\n@@ -208,25 +209,7 @@ class LLVMDependencyConfigTool(ConfigToolDependency):\n # before `super().__init__` is called.\n HasNativeKwarg.__init__(self, kwargs)\n \n- # Ordered list of llvm-config binaries to try. Start with base, then try\n- # newest back to oldest (3.5 is arbitrary), and finally the devel version.\n- # Please note that llvm-config-6.0 is a development snapshot and it should\n- # not be moved to the beginning of the list.\n- self.tools = [\n- 'llvm-config', # base\n- 'llvm-config-8', 'llvm-config80',\n- 'llvm-config-7', 'llvm-config70',\n- 'llvm-config-6.0', 'llvm-config60',\n- 'llvm-config-5.0', 'llvm-config50',\n- 'llvm-config-4.0', 'llvm-config40',\n- 'llvm-config-3.9', 'llvm-config39',\n- 'llvm-config-3.8', 'llvm-config38',\n- 'llvm-config-3.7', 'llvm-config37',\n- 'llvm-config-3.6', 'llvm-config36',\n- 'llvm-config-3.5', 'llvm-config35',\n- 'llvm-config-9', # Debian development snapshot\n- 'llvm-config-devel', # FreeBSD development snapshot\n- ]\n+ self.tools = get_llvm_tool_names('llvm-config')\n \n # Fedora starting with Fedora 30 adds a suffix of the number\n # of bits in the isa that llvm targets, for example, on x86_64\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/environment.py", "new_path": "mesonbuild/environment.py", "diff": "@@ -163,6 +163,32 @@ def detect_ninja(version: str = '1.5', log: bool = False) -> str:\n mlog.log('Found {}-{} at {}'.format(name, found, quote_arg(n)))\n return n\n \n+def get_llvm_tool_names(tool: str) -> typing.List[str]:\n+ # Ordered list of possible suffixes of LLVM executables to try. Start with\n+ # base, then try newest back to oldest (3.5 is arbitrary), and finally the\n+ # devel version. Please note that the development snapshot in Debian does\n+ # not have a distinct name. Do not move it to the beginning of the list\n+ # unless it becomes a stable release.\n+ suffixes = [\n+ '', # base (no suffix)\n+ '-8', '80',\n+ '-7', '70',\n+ '-6.0', '60',\n+ '-5.0', '50',\n+ '-4.0', '40',\n+ '-3.9', '39',\n+ '-3.8', '38',\n+ '-3.7', '37',\n+ '-3.6', '36',\n+ '-3.5', '35',\n+ '-9', # Debian development snapshot\n+ '-devel', # FreeBSD development snapshot\n+ ]\n+ names = []\n+ for suffix in suffixes:\n+ names.append(tool + suffix)\n+ return names\n+\n def detect_scanbuild():\n \"\"\" Look for scan-build binary on build platform\n \n@@ -182,20 +208,7 @@ def detect_scanbuild():\n exelist = split_args(os.environ['SCANBUILD'])\n \n else:\n- tools = [\n- 'scan-build', # base\n- 'scan-build-8', 'scan-build80',\n- 'scan-build-7', 'scan-build70',\n- 'scan-build-6.0', 'scan-build60',\n- 'scan-build-5.0', 'scan-build50',\n- 'scan-build-4.0', 'scan-build40',\n- 'scan-build-3.9', 'scan-build39',\n- 'scan-build-3.8', 'scan-build38',\n- 'scan-build-3.7', 'scan-build37',\n- 'scan-build-3.6', 'scan-build36',\n- 'scan-build-3.5', 'scan-build35',\n- 'scan-build-9', 'scan-build-devel', # development snapshot\n- ]\n+ tools = get_llvm_tool_names('scan-build')\n for tool in tools:\n if shutil.which(tool) is not None:\n exelist = [shutil.which(tool)]\n" } ]
712b2f08c7126863a68dc98c021fc40a4de462fd
mesonbuild/meson
22.01.2020 20:52:30
Apache License 2.0
Move git helper out into mesonlib for reuse Reuse the git helper for `meson wrap` and `meson subprojects` so we don't need to maintain the same git-colors-on-windows workarounds in multiple places.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/mesonlib.py", "new_path": "mesonbuild/mesonlib.py", "diff": "@@ -56,6 +56,20 @@ else:\n python_command = [sys.executable]\n meson_command = None\n \n+GIT = shutil.which('git')\n+def git(cmd: T.List[str], workingdir: str, **kwargs) -> subprocess.CompletedProcess:\n+ pc = subprocess.run([GIT, '-C', workingdir] + cmd,\n+ # Redirect stdin to DEVNULL otherwise git messes up the\n+ # console and ANSI colors stop working on Windows.\n+ stdin=subprocess.DEVNULL, **kwargs)\n+ # Sometimes git calls git recursively, such as `git submodule update\n+ # --recursive` which will be without the above workaround, so set the\n+ # console mode again just in case.\n+ if platform.system().lower() == 'windows':\n+ mlog._windows_ansi()\n+ return pc\n+\n+\n def set_meson_command(mainfile):\n global python_command\n global meson_command\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/msubprojects.py", "new_path": "mesonbuild/msubprojects.py", "diff": "@@ -2,7 +2,7 @@ import os, subprocess\n import argparse\n \n from . import mlog\n-from .mesonlib import Popen_safe\n+from .mesonlib import git, Popen_safe\n from .wrap.wrap import API_ROOT, PackageDefinition, Resolver, WrapException\n from .wrap import wraptool\n \n@@ -40,12 +40,8 @@ def update_file(wrap, repo_dir, options):\n ' In that case, delete', mlog.bold(repo_dir), 'and run', mlog.bold('meson --reconfigure'))\n \n def git_output(cmd, workingdir):\n- return subprocess.check_output(['git', '-C', workingdir] + cmd,\n- # Redirect stdin to DEVNULL otherwise git\n- # messes up the console and ANSI colors stop\n- # working on Windows.\n- stdin=subprocess.DEVNULL,\n- stderr=subprocess.STDOUT).decode()\n+ return git(cmd, workingdir, check=True, universal_newlines=True,\n+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout\n \n def git_show(repo_dir):\n commit_message = git_output(['show', '--quiet', '--pretty=format:%h%n%d%n%s%n[%an]'], repo_dir)\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/wrap/wrap.py", "new_path": "mesonbuild/wrap/wrap.py", "diff": "@@ -18,7 +18,6 @@ import urllib.request\n import urllib.error\n import urllib.parse\n import os\n-import platform\n import hashlib\n import shutil\n import tempfile\n@@ -29,7 +28,7 @@ import configparser\n import typing as T\n \n from . import WrapMode\n-from ..mesonlib import ProgressBar, MesonException\n+from ..mesonlib import git, GIT, ProgressBar, MesonException\n \n if T.TYPE_CHECKING:\n import http.client\n@@ -44,23 +43,10 @@ except ImportError:\n has_ssl = False\n API_ROOT = 'http://wrapdb.mesonbuild.com/v1/'\n \n-GIT = shutil.which('git')\n REQ_TIMEOUT = 600.0\n SSL_WARNING_PRINTED = False\n WHITELIST_SUBDOMAIN = 'wrapdb.mesonbuild.com'\n \n-def git(cmd: T.List[str], workingdir: str, **kwargs) -> subprocess.CompletedProcess:\n- pc = subprocess.run([GIT, '-C', workingdir] + cmd,\n- # Redirect stdin to DEVNULL otherwise git messes up the\n- # console and ANSI colors stop working on Windows.\n- stdin=subprocess.DEVNULL, **kwargs)\n- # Sometimes git calls git recursively, such as `git submodule update\n- # --recursive` which will be without the above workaround, so set the\n- # console mode again just in case.\n- if platform.system().lower() == 'windows':\n- mlog._windows_ansi()\n- return pc\n-\n def quiet_git(cmd: T.List[str], workingdir: str) -> T.Tuple[bool, str]:\n if not GIT:\n return False, 'Git program not found.'\n" } ]
3d6d908f7c41623c26289e09ed6608cae14d4948
mesonbuild/meson
11.02.2020 21:12:20
Apache License 2.0
Refactor TestResult object initialization Just add or alter attributes of the TestResult object, rather than many, many invocations of the constructor with very similar arguments. Define helper methods for TestResult to add the results of a step and set the failure reason.
[ { "change_type": "MODIFY", "old_path": "run_project_tests.py", "new_path": "run_project_tests.py", "diff": "@@ -65,17 +65,30 @@ class BuildStep(Enum):\n \n \n class TestResult:\n- def __init__(self, msg, step, stdo, stde, mlog, cicmds, conftime=0, buildtime=0, testtime=0):\n- self.msg = msg\n- self.step = step\n- self.stdo = stdo\n- self.stde = stde\n- self.mlog = mlog\n+ def __init__(self, cicmds):\n+ self.msg = '' # empty msg indicates test success\n+ self.stdo = ''\n+ self.stde = ''\n+ self.mlog = ''\n self.cicmds = cicmds\n- self.conftime = conftime\n- self.buildtime = buildtime\n- self.testtime = testtime\n+ self.conftime = 0\n+ self.buildtime = 0\n+ self.testtime = 0\n \n+ def add_step(self, step, stdo, stde, mlog='', time=0):\n+ self.step = step\n+ self.stdo += stdo\n+ self.stde += stde\n+ self.mlog += mlog\n+ if step == BuildStep.configure:\n+ self.conftime = time\n+ elif step == BuildStep.build:\n+ self.buildtime = time\n+ elif step == BuildStep.test:\n+ self.testtime = time\n+\n+ def fail(self, msg):\n+ self.msg = msg\n \n @functools.total_ordering\n class TestDef:\n@@ -434,16 +447,20 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen\n except Exception:\n mesonlog = no_meson_log_msg\n cicmds = run_ci_commands(mesonlog)\n- gen_time = time.time() - gen_start\n+ testresult = TestResult(cicmds)\n+ testresult.add_step(BuildStep.configure, stdo, stde, mesonlog, time.time() - gen_start)\n if should_fail == 'meson':\n if returncode == 1:\n- return TestResult('', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)\n+ return testresult\n elif returncode != 0:\n- return TestResult('Test exited with unexpected status {}'.format(returncode), BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)\n+ testresult.fail('Test exited with unexpected status {}.'.format(returncode))\n+ return testresult\n else:\n- return TestResult('Test that should have failed succeeded', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)\n+ testresult.fail('Test that should have failed succeeded.')\n+ return testresult\n if returncode != 0:\n- return TestResult('Generating the build system failed.', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)\n+ testresult.fail('Generating the build system failed.')\n+ return testresult\n builddata = build.load(test_build_dir)\n # Touch the meson.build file to force a regenerate so we can test that\n # regeneration works before a build is run.\n@@ -453,15 +470,15 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen\n dir_args = get_backend_args_for_dir(backend, test_build_dir)\n build_start = time.time()\n pc, o, e = Popen_safe(compile_commands + dir_args, cwd=test_build_dir)\n- build_time = time.time() - build_start\n- stdo += o\n- stde += e\n+ testresult.add_step(BuildStep.build, o, e, '', time.time() - build_start)\n if should_fail == 'build':\n if pc.returncode != 0:\n- return TestResult('', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time)\n- return TestResult('Test that should have failed to build succeeded', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time)\n+ return testresult\n+ testresult.fail('Test that should have failed to build succeeded.')\n+ return testresult\n if pc.returncode != 0:\n- return TestResult('Compiling source code failed.', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time, build_time)\n+ testresult.fail('Compiling source code failed.')\n+ return testresult\n # Touch the meson.build file to force a regenerate so we can test that\n # regeneration works after a build is complete.\n ensure_backend_detects_changes(backend)\n@@ -469,37 +486,44 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen\n test_start = time.time()\n # Test in-process\n (returncode, tstdo, tstde, test_log) = run_test_inprocess(test_build_dir)\n- test_time = time.time() - test_start\n- stdo += tstdo\n- stde += tstde\n- mesonlog += test_log\n+ testresult.add_step(BuildStep.test, tstdo, tstde, test_log, time.time() - test_start)\n if should_fail == 'test':\n if returncode != 0:\n- return TestResult('', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time)\n- return TestResult('Test that should have failed to run unit tests succeeded', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time)\n+ return testresult\n+ testresult.fail('Test that should have failed to run unit tests succeeded.')\n+ return testresult\n if returncode != 0:\n- return TestResult('Running unit tests failed.', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)\n+ testresult.fail('Running unit tests failed.')\n+ return testresult\n # Do installation, if the backend supports it\n if install_commands:\n env = os.environ.copy()\n env['DESTDIR'] = install_dir\n # Install with subprocess\n pi, o, e = Popen_safe(install_commands, cwd=test_build_dir, env=env)\n- stdo += o\n- stde += e\n+ testresult.add_step(BuildStep.install, o, e)\n if pi.returncode != 0:\n- return TestResult('Running install failed.', BuildStep.install, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)\n+ testresult.fail('Running install failed.')\n+ return testresult\n+\n # Clean with subprocess\n env = os.environ.copy()\n pi, o, e = Popen_safe(clean_commands + dir_args, cwd=test_build_dir, env=env)\n- stdo += o\n- stde += e\n+ testresult.add_step(BuildStep.clean, o, e)\n if pi.returncode != 0:\n- return TestResult('Running clean failed.', BuildStep.clean, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)\n+ testresult.fail('Running clean failed.')\n+ return testresult\n+\n+ # Validate installed files\n+ testresult.add_step(BuildStep.install, '', '')\n if not install_commands:\n- return TestResult('', BuildStep.install, '', '', mesonlog, cicmds, gen_time, build_time, test_time)\n- return TestResult(validate_install(testdir, install_dir, compiler, builddata.environment),\n- BuildStep.validate, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)\n+ return testresult\n+ install_msg = validate_install(testdir, install_dir, compiler, builddata.environment)\n+ if install_msg:\n+ testresult.fail(install_msg)\n+ return testresult\n+\n+ return testresult\n \n def gather_tests(testdir: Path) -> T.Iterator[TestDef]:\n tests = [t.name for t in testdir.glob('*') if t.is_dir()]\n" } ]
28e3ce67ae49494d57372f27b6f91580656f77a7
mesonbuild/meson
30.04.2020 13:54:46
Apache License 2.0
Convert test protocol into an enum This gives us better type safety, and will be important as we add more test methods
[ { "change_type": "MODIFY", "old_path": "mesonbuild/backend/backends.py", "new_path": "mesonbuild/backend/backends.py", "diff": "@@ -14,6 +14,7 @@\n \n from collections import OrderedDict\n from functools import lru_cache\n+import enum\n import json\n import os\n import pickle\n@@ -28,12 +29,33 @@ from .. import dependencies\n from .. import mesonlib\n from .. import mlog\n from ..compilers import CompilerArgs, VisualStudioLikeCompiler\n-from ..interpreter import Interpreter\n from ..mesonlib import (\n File, MachineChoice, MesonException, OrderedSet, OptionOverrideProxy,\n classify_unity_sources, unholder\n )\n \n+if T.TYPE_CHECKING:\n+ from ..interpreter import Interpreter\n+\n+\n+class TestProtocol(enum.Enum):\n+\n+ EXITCODE = 0\n+ TAP = 1\n+\n+ @classmethod\n+ def from_str(cls, string: str) -> 'TestProtocol':\n+ if string == 'exitcode':\n+ return cls.EXITCODE\n+ elif string == 'tap':\n+ return cls.TAP\n+ raise MesonException('unknown test format {}'.format(string))\n+\n+ def __str__(self) -> str:\n+ if self is self.EXITCODE:\n+ return 'exitcode'\n+ return 'tap'\n+\n \n class CleanTrees:\n '''\n@@ -91,7 +113,7 @@ class TestSerialisation:\n needs_exe_wrapper: bool, is_parallel: bool, cmd_args: T.List[str],\n env: build.EnvironmentVariables, should_fail: bool,\n timeout: T.Optional[int], workdir: T.Optional[str],\n- extra_paths: T.List[str], protocol: str, priority: int):\n+ extra_paths: T.List[str], protocol: TestProtocol, priority: int):\n self.name = name\n self.project_name = project\n self.suite = suite\n@@ -111,7 +133,7 @@ class TestSerialisation:\n self.priority = priority\n self.needs_exe_wrapper = needs_exe_wrapper\n \n-def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, interpreter: T.Optional[Interpreter] = None) -> T.Optional['Backend']:\n+def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, interpreter: T.Optional['Interpreter'] = None) -> T.Optional['Backend']:\n if backend == 'ninja':\n from . import ninjabackend\n return ninjabackend.NinjaBackend(build, interpreter)\n@@ -138,7 +160,7 @@ def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, i\n # This class contains the basic functionality that is needed by all backends.\n # Feel free to move stuff in and out of it as you see fit.\n class Backend:\n- def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional[Interpreter]):\n+ def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional['Interpreter']):\n # Make it possible to construct a dummy backend\n # This is used for introspection without a build directory\n if build is None:\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/interpreter.py", "new_path": "mesonbuild/interpreter.py", "diff": "@@ -33,6 +33,7 @@ from .interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs\n from .interpreterbase import ObjectHolder\n from .modules import ModuleReturnValue\n from .cmake import CMakeInterpreter\n+from .backend.backends import TestProtocol\n \n from pathlib import Path, PurePath\n import os\n@@ -979,7 +980,7 @@ class Test(InterpreterObject):\n self.should_fail = should_fail\n self.timeout = timeout\n self.workdir = workdir\n- self.protocol = protocol\n+ self.protocol = TestProtocol.from_str(protocol)\n self.priority = priority\n \n def get_exe(self):\n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mintro.py", "new_path": "mesonbuild/mintro.py", "diff": "@@ -328,7 +328,7 @@ def get_test_list(testdata) -> T.List[T.Dict[str, T.Union[str, int, T.List[str],\n to['suite'] = t.suite\n to['is_parallel'] = t.is_parallel\n to['priority'] = t.priority\n- to['protocol'] = t.protocol\n+ to['protocol'] = str(t.protocol)\n result.append(to)\n return result\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/mtest.py", "new_path": "mesonbuild/mtest.py", "diff": "@@ -43,6 +43,7 @@ from . import environment\n from . import mlog\n from .dependencies import ExternalProgram\n from .mesonlib import MesonException, get_wine_shortpath, split_args\n+from .backend.backends import TestProtocol\n \n if T.TYPE_CHECKING:\n from .backend.backends import TestSerialisation\n@@ -631,7 +632,7 @@ class SingleTestRunner:\n if not self.options.verbose:\n stdout = tempfile.TemporaryFile(\"wb+\")\n stderr = tempfile.TemporaryFile(\"wb+\") if self.options.split else stdout\n- if self.test.protocol == 'tap' and stderr is stdout:\n+ if self.test.protocol is TestProtocol.TAP and stderr is stdout:\n stdout = tempfile.TemporaryFile(\"wb+\")\n \n # Let gdb handle ^C instead of us\n@@ -741,7 +742,7 @@ class SingleTestRunner:\n if timed_out:\n return TestRun(self.test, self.test_env, TestResult.TIMEOUT, [], p.returncode, starttime, duration, stdo, stde, cmd)\n else:\n- if self.test.protocol == 'exitcode':\n+ if self.test.protocol is TestProtocol.EXITCODE:\n return TestRun.make_exitcode(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd)\n else:\n if self.options.verbose:\n" } ]
f21685a83330a4bbe1e59c3641a0d24f1efe8825
mesonbuild/meson
21.06.2021 17:45:08
Apache License 2.0
Delete redirected wrap files in subprojects purge We need to store the original filename as well as whether the wrap was redirected in order to properly purge the redirected wrap.
[ { "change_type": "MODIFY", "old_path": "mesonbuild/msubprojects.py", "new_path": "mesonbuild/msubprojects.py", "diff": "@@ -386,6 +386,12 @@ class Runner:\n if not self.wrap.type:\n return True\n \n+ if self.wrap.redirected:\n+ redirect_file = Path(self.wrap.original_filename).resolve()\n+ if self.options.confirm:\n+ redirect_file.unlink()\n+ mlog.log(f'Deleting {redirect_file}')\n+\n if self.wrap.type == 'redirect':\n redirect_file = Path(self.wrap.filename).resolve()\n if self.options.confirm:\n@@ -416,7 +422,7 @@ class Runner:\n # parallelized, another thread could have deleted it already.\n try:\n if not any(packagecache.iterdir()):\n- packagecache.rmdir()\n+ windows_proof_rmtree(str(packagecache))\n except FileNotFoundError:\n pass\n \n" }, { "change_type": "MODIFY", "old_path": "mesonbuild/wrap/wrap.py", "new_path": "mesonbuild/wrap/wrap.py", "diff": "@@ -101,6 +101,8 @@ class PackageDefinition:\n self.name = self.basename[:-5] if self.has_wrap else self.basename\n self.directory = self.name\n self.provided_deps[self.name] = None\n+ self.original_filename = fname\n+ self.redirected = False\n if self.has_wrap:\n self.parse_wrap()\n self.directory = self.values.get('directory', self.name)\n@@ -109,6 +111,7 @@ class PackageDefinition:\n if self.type and self.type not in ALL_TYPES:\n raise WrapException(f'Unknown wrap type {self.type!r}')\n self.filesdir = os.path.join(os.path.dirname(self.filename), 'packagefiles')\n+ # What the original file name was before redirection\n \n def parse_wrap(self) -> None:\n try:\n@@ -137,6 +140,7 @@ class PackageDefinition:\n raise WrapException(f'wrap-redirect {fname} filename does not exist')\n self.filename = str(fname)\n self.parse_wrap()\n+ self.redirected = True\n return\n self.parse_provide_section(config)\n \n" }, { "change_type": "MODIFY", "old_path": "run_unittests.py", "new_path": "run_unittests.py", "diff": "@@ -10181,6 +10181,19 @@ class SubprojectsCommandTests(BasePlatformTests):\n self._git_create_local_repo('sub_git')\n self._wrap_create_git('sub_git')\n \n+ sub_file_subprojects_dir = self.subprojects_dir / 'sub_file' / 'subprojects'\n+ sub_file_subprojects_dir.mkdir(exist_ok=True, parents=True)\n+ real_dir = Path('sub_file') / 'subprojects' / 'real'\n+\n+ self._wrap_create_file(real_dir, tarball='dummy2.tar.gz')\n+\n+ with open(str((self.subprojects_dir / 'redirect').with_suffix('.wrap')), 'w', encoding='utf-8') as f:\n+ f.write(textwrap.dedent(\n+ f'''\n+ [wrap-redirect]\n+ filename = {real_dir}.wrap\n+ '''))\n+\n def deleting(s: str) -> T.List[str]:\n ret = []\n prefix = 'Deleting '\n@@ -10190,14 +10203,31 @@ class SubprojectsCommandTests(BasePlatformTests):\n return sorted(ret)\n \n out = self._subprojects_cmd(['purge'])\n- self.assertEqual(deleting(out), [str(self.subprojects_dir / 'sub_file'), str(self.subprojects_dir / 'sub_git')])\n+ self.assertEqual(deleting(out), sorted([\n+ str(self.subprojects_dir / 'redirect.wrap'),\n+ str(self.subprojects_dir / 'sub_file'),\n+ str(self.subprojects_dir / 'sub_git'),\n+ ]))\n out = self._subprojects_cmd(['purge', '--include-cache'])\n- self.assertEqual(deleting(out), [str(self.subprojects_dir / 'packagecache' / 'dummy.tar.gz'), str(self.subprojects_dir / 'sub_file'), str(self.subprojects_dir / 'sub_git')])\n+ self.assertEqual(deleting(out), sorted([\n+ str(self.subprojects_dir / 'sub_git'),\n+ str(self.subprojects_dir / 'redirect.wrap'),\n+ str(self.subprojects_dir / 'packagecache' / 'dummy.tar.gz'),\n+ str(self.subprojects_dir / 'packagecache' / 'dummy2.tar.gz'),\n+ str(self.subprojects_dir / 'sub_file'),\n+ ]))\n out = self._subprojects_cmd(['purge', '--include-cache', '--confirm'])\n- self.assertEqual(deleting(out), [str(self.subprojects_dir / 'packagecache' / 'dummy.tar.gz'), str(self.subprojects_dir / 'sub_file'), str(self.subprojects_dir / 'sub_git')])\n+ self.assertEqual(deleting(out), sorted([\n+ str(self.subprojects_dir / 'sub_git'),\n+ str(self.subprojects_dir / 'redirect.wrap'),\n+ str(self.subprojects_dir / 'packagecache' / 'dummy.tar.gz'),\n+ str(self.subprojects_dir / 'packagecache' / 'dummy2.tar.gz'),\n+ str(self.subprojects_dir / 'sub_file'),\n+ ]))\n self.assertFalse(Path(self.subprojects_dir / 'packagecache' / 'dummy.tar.gz').exists())\n self.assertFalse(Path(self.subprojects_dir / 'sub_file').exists())\n self.assertFalse(Path(self.subprojects_dir / 'sub_git').exists())\n+ self.assertFalse(Path(self.subprojects_dir / 'redirect.wrap').exists())\n \n def _clang_at_least(compiler: 'Compiler', minver: str, apple_minver: T.Optional[str]) -> bool:\n \"\"\"\n" } ]
e9036760713718bbfd3d7db9f3dbc03576924e52
lmcinnes/umap
04.05.2021 23:51:13
BSD 3-Clause New or Revised License
Add support for saving embeddings at intermediate epochs Returns a list of intermediate embeddings in UMAP.embedding_list
[ { "change_type": "MODIFY", "old_path": "umap/layouts.py", "new_path": "umap/layouts.py", "diff": "@@ -252,8 +252,12 @@ def optimize_layout_euclidean(\n The indices of the heads of 1-simplices with non-zero membership.\n tail: array of shape (n_1_simplices)\n The indices of the tails of 1-simplices with non-zero membership.\n- n_epochs: int\n- The number of training epochs to use in optimization.\n+ n_epochs: int, or list of int\n+ The number of training epochs to use in optimization, or a list of\n+ epochs at which to save the embedding. In case of a list, the optimization\n+ will use the maximum number of epochs in the list, and will return a list\n+ of embedding in the order of increasing epoch, regardless of the order in\n+ the epoch list.\n n_vertices: int\n The number of vertices (0-simplices) in the dataset.\n epochs_per_samples: array of shape (n_1_simplices)\n@@ -322,6 +326,12 @@ def optimize_layout_euclidean(\n dens_phi_sum = np.zeros(1, dtype=np.float32)\n dens_re_sum = np.zeros(1, dtype=np.float32)\n \n+ epochs_list = None\n+ embedding_list = []\n+ if isinstance(n_epochs, list):\n+ epochs_list = n_epochs\n+ n_epochs = max(epochs_list)\n+\n for n in range(n_epochs):\n \n densmap_flag = (\n@@ -385,7 +395,14 @@ def optimize_layout_euclidean(\n if verbose and n % int(n_epochs / 10) == 0:\n print(\"\\tcompleted \", n, \" / \", n_epochs, \"epochs\")\n \n- return head_embedding\n+ if epochs_list is not None and n in epochs_list:\n+ embedding_list.append(head_embedding.copy())\n+\n+ # Add the last embedding to the list as well\n+ if epochs_list is not None:\n+ embedding_list.append(head_embedding.copy())\n+\n+ return head_embedding if epochs_list is None else embedding_list\n \n \n @numba.njit(fastmath=True)\n" }, { "change_type": "MODIFY", "old_path": "umap/umap_.py", "new_path": "umap/umap_.py", "diff": "@@ -978,11 +978,14 @@ def simplicial_set_embedding(\n in greater repulsive force being applied, greater optimization\n cost, but slightly more accuracy.\n \n- n_epochs: int (optional, default 0)\n+ n_epochs: int (optional, default 0), or list of int\n The number of training epochs to be used in optimizing the\n low dimensional embedding. Larger values result in more accurate\n embeddings. If 0 is specified a value will be selected based on\n the size of the input dataset (200 for large datasets, 500 for small).\n+ If a list of int is specified, then the intermediate embeddings at the\n+ different epochs specified in that list are returned in\n+ ``aux_data[\"embedding_list\"]``.\n \n init: string\n How to initialize the low dimensional embedding. Options are:\n@@ -1054,11 +1057,14 @@ def simplicial_set_embedding(\n if densmap:\n default_epochs += 200\n \n- if n_epochs is None:\n- n_epochs = default_epochs\n+ # Get the maximum epoch to reach\n+ n_epochs_max = max(n_epochs) if isinstance(n_epochs, list) else n_epochs\n \n- if n_epochs > 10:\n- graph.data[graph.data < (graph.data.max() / float(n_epochs))] = 0.0\n+ if n_epochs_max is None:\n+ n_epochs_max = default_epochs\n+\n+ if n_epochs_max > 10:\n+ graph.data[graph.data < (graph.data.max() / float(n_epochs_max))] = 0.0\n else:\n graph.data[graph.data < (graph.data.max() / float(default_epochs))] = 0.0\n \n@@ -1099,7 +1105,7 @@ def simplicial_set_embedding(\n else:\n embedding = init_data\n \n- epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs)\n+ epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs_max)\n \n head = graph.row\n tail = graph.col\n@@ -1188,6 +1194,11 @@ def simplicial_set_embedding(\n verbose=verbose,\n move_other=True,\n )\n+\n+ if isinstance(embedding, list):\n+ aux_data[\"embedding_list\"] = embedding\n+ embedding = embedding[-1].copy()\n+\n if output_dens:\n if verbose:\n print(ts() + \" Computing embedding densities\")\n@@ -1707,10 +1718,16 @@ class UMAP(BaseEstimator):\n raise ValueError(\"n_components must be an int\")\n if self.n_components < 1:\n raise ValueError(\"n_components must be greater than 0\")\n+ self.n_epochs_list = None\n+ if isinstance(self.n_epochs, list):\n+ if not all(isinstance(n, int) and n >= 0 for n in self.n_epochs):\n+ raise ValueError(\"n_epochs must be a nonnegative integer or a list of nonnegative integers\")\n+ self.n_epochs_list = self.n_epochs\n+ self.n_epochs = max(self.n_epochs_list)\n if self.n_epochs is not None and (\n- self.n_epochs < 0 or not isinstance(self.n_epochs, int)\n- ):\n- raise ValueError(\"n_epochs must be a nonnegative integer\")\n+ self.n_epochs < 0 or not isinstance(self.n_epochs, int)\n+ ):\n+ raise ValueError(\"n_epochs must be a nonnegative integer or a list of nonnegative integers\")\n if self.metric_kwds is None:\n self._metric_kwds = {}\n else:\n@@ -2577,12 +2594,21 @@ class UMAP(BaseEstimator):\n print(ts(), \"Construct embedding\")\n \n if self.transform_mode == \"embedding\":\n+ epochs = self.n_epochs_list if self.n_epochs_list is not None else self.n_epochs\n self.embedding_, aux_data = self._fit_embed_data(\n self._raw_data[index],\n- self.n_epochs,\n+ epochs,\n init,\n random_state, # JH why raw data?\n )\n+\n+ if self.n_epochs_list is not None:\n+ if not \"embedding_list\" in aux_data:\n+ raise KeyError(\"No list of embedding were found in 'aux_data'. It is likely the\"\n+ \"layout optimization function doesn't support the list of int for 'n_epochs'.\")\n+ else:\n+ self.embedding_list = aux_data[\"embedding_list\"]\n+\n # Assign any points that are fully disconnected from our manifold(s) to have embedding\n # coordinates of np.nan. These will be filtered by our plotting functions automatically.\n # They also prevent users from being deceived a distance query to one of these points.\n" } ]
5c20bf11a02c24e8caebf955706e21f278544bc7
dguenms/dawn-of-civilization
14.10.2018 18:34:22
MIT License
Adjust UHV goals to new wonders - second French goal now requires the Louvre and the Metropolitain instead of the Statue of Liberty - second Mughal goal now requires Shalimar Gardens instead of Harmandir Sahib - second American goal now also requires Brooklyn Bridge and Golden Gate Bridge
[ { "change_type": "MODIFY", "old_path": "Assets/Python/Victory.py", "new_path": "Assets/Python/Victory.py", "diff": "@@ -213,11 +213,11 @@ dWonderGoals = {\n \tiMaya: (1, [iTempleOfKukulkan], True),\r\n \tiMoors: (1, [iMezquita], False),\r\n \tiKhmer: (0, [iWatPreahPisnulok], False),\r\n-\tiFrance: (2, [iNotreDame, iVersailles, iStatueOfLiberty, iEiffelTower], True),\r\n+\tiFrance: (2, [iNotreDame, iVersailles, iLouvre, iEiffelTower, iMetropolitain], True),\r\n \tiMali: (1, [iUniversityOfSankore], False),\r\n \tiItaly: (0, [iSanMarcoBasilica, iSistineChapel, iSantaMariaDelFiore], True),\r\n-\tiMughals: (1, [iTajMahal, iRedFort, iHarmandirSahib], True),\r\n-\tiAmerica: (1, [iStatueOfLiberty, iEmpireStateBuilding, iPentagon, iUnitedNations], True),\r\n+\tiMughals: (1, [iTajMahal, iRedFort, iShalimarGardens], True),\r\n+\tiAmerica: (1, [iStatueOfLiberty, iBrooklynBridge, iEmpireStateBuilding, iGoldenGateBridge, iPentagon, iUnitedNations], True),\r\n \tiBrazil: (1, [iWembley, iCristoRedentor, iItaipuDam], True),\r\n }\r\n \r\n@@ -241,7 +241,8 @@ def setup():\n \t\t\n \t\t# French goal needs to be winnable\n \t\tdata.setWonderBuilder(iNotreDame, iFrance)\n-\t\tdata.setWonderBuilder(iVersailles, iFrance)\n+\t\tdata.setWonderBuilder(iVersailles, iFrance)\r\n+\t\tdata.setWonderBuilder(iLouvre, iFrance)\n \t\t\n \t\t# help Congo\n \t\tdata.iCongoSlaveCounter += 500\n@@ -930,7 +931,7 @@ def checkTurn(iGameTurn, iPlayer):\n \t\t\telse:\r\n \t\t\t\tlose(iFrance, 1)\r\n \t\t\t\t\r\n-\t\t# third goal: build Notre Dame, Versailles, the Statue of Liberty and the Eiffel Tower by 1900 AD\r\n+\t\t# third goal: build Notre Dame, Versailles, the Louvre, the Eiffel Tower and the Metropolitain by 1900 AD\r\n \t\tif iGameTurn == getTurnForYear(1900):\r\n \t\t\texpire(iFrance, 2)\r\n \t\t\t\r\n@@ -1177,7 +1178,7 @@ def checkTurn(iGameTurn, iPlayer):\n \t\tif iGameTurn == getTurnForYear(1500):\r\n \t\t\texpire(iMughals, 0)\r\n \t\t\t\r\n-\t\t# second goal: build the Red Fort, Harmandir Sahib and the Taj Mahal by 1660 AD\r\n+\t\t# second goal: build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD\r\n \t\tif iGameTurn == getTurnForYear(1660):\r\n \t\t\texpire(iMughals, 1)\r\n \t\t\t\r\n@@ -1360,7 +1361,7 @@ def checkTurn(iGameTurn, iPlayer):\n \t\t\telse:\r\n \t\t\t\tlose(iAmerica, 0)\r\n \t\t\t\t\r\n-\t\t# second goal: build the Statue of Liberty, the Empire State Building, the Pentagon and the United Nations by 1950 AD\r\n+\t\t# second goal: build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD\r\n \t\tif iGameTurn == getTurnForYear(1950):\r\n \t\t\texpire(iAmerica, 1)\r\n \t\t\t\r\n@@ -3730,9 +3731,11 @@ def getUHVHelp(iPlayer, iGoal):\n \t\telif iGoal == 2:\r\n \t\t\tbNotreDame = data.getWonderBuilder(iNotreDame) == iFrance\r\n \t\t\tbVersailles = data.getWonderBuilder(iVersailles) == iFrance\r\n-\t\t\tbStatueOfLiberty = data.getWonderBuilder(iStatueOfLiberty) == iFrance\r\n+\t\t\tbLouvre = data.getWonderBuilder(iLouvre) == iFrance\r\n \t\t\tbEiffelTower = data.getWonderBuilder(iEiffelTower) == iFrance\r\n-\t\t\taHelp.append(getIcon(bNotreDame) + localText.getText(\"TXT_KEY_BUILDING_NOTRE_DAME\", ()) + ' ' + getIcon(bVersailles) + localText.getText(\"TXT_KEY_BUILDING_VERSAILLES\", ()) + ' ' + getIcon(bStatueOfLiberty) + localText.getText(\"TXT_KEY_BUILDING_STATUE_OF_LIBERTY\", ()) + ' ' + getIcon(bEiffelTower) + localText.getText(\"TXT_KEY_BUILDING_EIFFEL_TOWER\", ()))\r\n+\t\t\tbMetropolitain = data.getWonderBuilder(iMetropolitain) == iFrance\r\n+\t\t\taHelp.append(getIcon(bNotreDame) + localText.getText(\"TXT_KEY_BUILDING_NOTRE_DAME\", ()) + ' ' + getIcon(bVersailles) + localText.getText(\"TXT_KEY_BUILDING_VERSAILLES\", ()) + ' ' + getIcon(bLouvre) + localText.getText(\"TXT_KEY_BUILDING_LOUVRE\", ()))\r\n+\t\t\taHelp.append(getIcon(bEiffelTower) + localText.getText(\"TXT_KEY_BUILDING_EIFFEL_TOWER\", ()) + ' ' + getIcon(bMetropolitain) + localText.getText(\"TXT_KEY_BUILDING_METROPOLITAIN\", ()))\r\n \r\n \telif iPlayer == iKhmer:\r\n \t\tif iGoal == 0:\r\n@@ -3881,9 +3884,9 @@ def getUHVHelp(iPlayer, iGoal):\n \t\t\taHelp.append(getIcon(iNumMosques >= 3) + localText.getText(\"TXT_KEY_VICTORY_MOSQUES_BUILT\", (iNumMosques, 3)))\r\n \t\telif iGoal == 1:\r\n \t\t\tbRedFort = data.getWonderBuilder(iRedFort) == iMughals\r\n-\t\t\tbHarmandirSahib = data.getWonderBuilder(iHarmandirSahib) == iMughals\r\n+\t\t\tbShalimarGardens = data.getWonderBuilder(iShalimarGardens) == iMughals\r\n \t\t\tbTajMahal = data.getWonderBuilder(iTajMahal) == iMughals\r\n-\t\t\taHelp.append(getIcon(bRedFort) + localText.getText(\"TXT_KEY_BUILDING_RED_FORT\", ()) + ' ' + getIcon(bHarmandirSahib) + localText.getText(\"TXT_KEY_BUILDING_HARMANDIR_SAHIB\", ()) + ' ' + getIcon(bTajMahal) + localText.getText(\"TXT_KEY_BUILDING_TAJ_MAHAL\", ()))\r\n+\t\t\taHelp.append(getIcon(bRedFort) + localText.getText(\"TXT_KEY_BUILDING_RED_FORT\", ()) + ' ' + getIcon(bShalimarGardens) + localText.getText(\"TXT_KEY_BUILDING_SHALIMAR_GARDENS\", ()) + ' ' + getIcon(bTajMahal) + localText.getText(\"TXT_KEY_BUILDING_TAJ_MAHAL\", ()))\r\n \t\telif iGoal == 2:\r\n \t\t\tiCulture = pMughals.countTotalCulture()\r\n \t\t\taHelp.append(getIcon(iCulture >= utils.getTurns(50000)) + localText.getText(\"TXT_KEY_VICTORY_TOTAL_CULTURE\", (iCulture, utils.getTurns(50000))))\r\n@@ -3996,10 +3999,13 @@ def getUHVHelp(iPlayer, iGoal):\n \t\t\taHelp.append(getIcon(bAmericas) + localText.getText(\"TXT_KEY_VICTORY_NO_NORTH_AMERICAN_COLONIES\", ()) + ' ' + getIcon(bMexico) + localText.getText(\"TXT_KEY_CIV_MEXICO_SHORT_DESC\", ()))\r\n \t\telif iGoal == 1:\r\n \t\t\tbUnitedNations = data.getWonderBuilder(iUnitedNations) == iAmerica\r\n+\t\t\tbBrooklynBridge = data.getWonderBuilder(iBrooklynBridge) == iAmerica\r\n \t\t\tbStatueOfLiberty = data.getWonderBuilder(iStatueOfLiberty) == iAmerica\r\n+\t\t\tbGoldenGateBridge = data.getWonderBuilder(iGoldenGateBridge) == iAmerica\r\n \t\t\tbPentagon = data.getWonderBuilder(iPentagon) == iAmerica\r\n \t\t\tbEmpireState = data.getWonderBuilder(iEmpireStateBuilding) == iAmerica\r\n-\t\t\taHelp.append(getIcon(bStatueOfLiberty) + localText.getText(\"TXT_KEY_BUILDING_STATUE_OF_LIBERTY\", ()) + ' ' + getIcon(bEmpireState) + localText.getText(\"TXT_KEY_BUILDING_EMPIRE_STATE_BUILDING\", ()) + ' ' + getIcon(bPentagon) + localText.getText(\"TXT_KEY_BUILDING_PENTAGON\", ()) + ' ' + getIcon(bUnitedNations) + localText.getText(\"TXT_KEY_BUILDING_UNITED_NATIONS\", ()))\r\n+\t\t\taHelp.append(getIcon(bStatueOfLiberty) + localText.getText(\"TXT_KEY_BUILDING_STATUE_OF_LIBERTY\", ()) + ' ' + getIcon(bBrooklynBridge) + localText.getText(\"TXT_KEY_BUILDING_BROOKLYN_BRIDGE\", ()) + ' ' + getIcon(bEmpireState) + localText.getText(\"TXT_KEY_BUILDING_EMPIRE_STATE_BUILDING\", ()))\r\n+\t\t\taHelp.append(getIcon(bGoldenGateBridge) + localText.getText(\"TXT_KEY_BUILDING_GOLDEN_GATE_BRIDGE\", ()) + ' ' + getIcon(bPentagon) + localText.getText(\"TXT_KEY_BUILDING_PENTAGON\", ()) + ' ' + getIcon(bUnitedNations) + localText.getText(\"TXT_KEY_BUILDING_UNITED_NATIONS\", ()))\r\n \t\telif iGoal == 2:\r\n \t\t\tiCounter = countResources(iAmerica, iOil)\r\n \t\t\taHelp.append(getIcon(iCounter >= 10) + localText.getText(\"TXT_KEY_VICTORY_OIL_SECURED\", (iCounter, 10)))\r\n" }, { "change_type": "MODIFY", "old_path": "Assets/XML/Text/Victory.xml", "new_path": "Assets/XML/Text/Victory.xml", "diff": "@@ -51,11 +51,11 @@\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_AME2</Tag>\n-\t\t<English>Build the Statue of Liberty, the Empire State Building, the Pentagon and the United Nations by 1950 AD</English>\n-\t\t<French>Construire la Statue de la Libert&#233;, l'Empire State Building, le Pentagone et les Nations Unies avant 1950 ap. J.-C.</French>\n-\t\t<German>Errichten Sie bis zum Jahr 1950 n. Chr. die Freiheitsstatue, das Empire State Building, das Pentagon und the Vereinten Nationen</German>\n-\t\t<Italian>Build the Statue of Liberty, the Empire State Building, the Pentagon and the United Nations by 1950 AD</Italian>\n-\t\t<Spanish>Build the Statue of Liberty, the Empire State Building, the Pentagon and the United Nations by 1950 AD</Spanish>\n+\t\t<English>Build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD</English>\n+\t\t<French>Build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD</French>\n+\t\t<German>Errichten Sie bis zum Jahr 1950 n. Chr. die Freiheitsstatue, die Brooklyn Bridge, das Empire State Building, die Golden Gate Bridge, das Pentagon und the Vereinten Nationen</German>\n+\t\t<Italian>Build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD</Italian>\n+\t\t<Spanish>Build the Statue of Liberty, the Brooklyn Bridge, the Empire State Building, the Golden Gate Bridge, the Pentagon and the United Nations by 1950 AD</Spanish>\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_AME3_TITLE</Tag>\n@@ -963,9 +963,9 @@\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_FRA3</Tag>\n-\t\t<English>Build Notre Dame, Versailles, the Statue of Liberty and the Eiffel Tower by 1900 AD</English>\n-\t\t<French>Construire Notre-Dame, Versailles, la Statue de la Libert&#233; et la Tour Eiffel avant 1900 ap. J.-C.</French>\n-\t\t<German>Bauen Sie bis zum Jahr 1900 n. Chr. Notre Dame, Versailles, die Freiheitsstatue und den Eiffelturm</German>\n+\t\t<English>Build Notre Dame, Versailles, the Louvre, the Eiffel Tower and the Metropolitain by 1900 AD</English>\n+\t\t<French>Build Notre Dame, Versailles, the Louvre, the Eiffel Tower and the Metropolitain by 1900 AD</French>\n+\t\t<German>Bauen Sie bis zum Jahr 1900 n. Chr. Notre Dame, Versailles, den Louvre, den Eiffelturm und die Metropolitain</German>\n \t\t<Italian>Build Notre Dame, Versailles, the Statue of Liberty and the Eiffel Tower by 1900 AD</Italian>\n \t\t<Spanish>Build Notre Dame, Versailles, the Statue of Liberty and the Eiffel Tower by 1900 AD</Spanish>\n \t</TEXT>\n@@ -2099,11 +2099,11 @@\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_MUG2</Tag>\n-\t\t<English>Build the Red Fort, Harmandir Sahib and the Taj Mahal by 1660 AD</English>\n-\t\t<French>Construire le Fort rouge, le Harmandir Sahib et le Taj Mahal avant 1660 ap. J.-C.</French>\n-\t\t<German>Vollenden Sie bis zum Jahr 1660 n. Chr. das Rote Fort, Harmandir Sahib und den Taj Mahal.</German>\n-\t\t<Italian>Build the Red Fort, Harmandir Sahib and the Taj Mahal by 1660 AD</Italian>\n-\t\t<Spanish>Build the Red Fort, Harmandir Sahib and the Taj Mahal by 1660 AD</Spanish>\n+\t\t<English>Build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD</English>\n+\t\t<French>Build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD</French>\n+\t\t<German>Vollenden Sie bis zum Jahr 1660 n. Chr. das Rote Fort, die Shalimar-G&#228;rten und den Taj Mahal.</German>\n+\t\t<Italian>Build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD</Italian>\n+\t\t<Spanish>Build the Red Fort, Shalimar Gardens and the Taj Mahal by 1660 AD</Spanish>\n \t</TEXT>\n \t<TEXT>\n \t\t<Tag>TXT_KEY_UHV_MUG3_TITLE</Tag>\n" } ]
86edc251a6f1252bff2a34af34451e231ad87218
apache/libcloud
19.11.2019 20:22:32
Apache License 2.0
Update S3 storage driver so it suppots "region" constructor argument. This way user can use this constructor class argument instead of using a different driver class per region. Also update code to return more user friendly error message if moved permanently error is returned by the API.
[ { "change_type": "MODIFY", "old_path": "libcloud/storage/drivers/s3.py", "new_path": "libcloud/storage/drivers/s3.py", "diff": "@@ -71,6 +71,28 @@ S3_SA_EAST_HOST = 's3-sa-east-1.amazonaws.com'\n S3_SA_SOUTHEAST2_HOST = 's3-sa-east-2.amazonaws.com'\n S3_CA_CENTRAL_HOST = 's3-ca-central-1.amazonaws.com'\n \n+# Maps AWS region name to connection hostname\n+REGION_TO_HOST_MAP = {\n+ 'us-east-1': S3_US_STANDARD_HOST,\n+ 'us-east-2': S3_US_EAST2_HOST,\n+ 'us-west-1': S3_US_WEST_HOST,\n+ 'us-west-2': S3_US_WEST_OREGON_HOST,\n+ 'us-gov-west-1': S3_US_GOV_WEST_HOST,\n+ 'cn-north-1': S3_CN_NORTH_HOST,\n+ 'cn-northwest-1': S3_CN_NORTHWEST_HOST,\n+ 'eu-west-1': S3_EU_WEST_HOST,\n+ 'eu-west-2': S3_EU_WEST2_HOST,\n+ 'eu-central-1': S3_EU_CENTRAL_HOST,\n+ 'ap-south-1': S3_AP_SOUTH_HOST,\n+ 'ap-southeast-1': S3_AP_SOUTHEAST_HOST,\n+ 'ap-southeast-2': S3_AP_SOUTHEAST2_HOST,\n+ 'ap-northeast-1': S3_AP_NORTHEAST1_HOST,\n+ 'ap-northeast-2': S3_AP_NORTHEAST2_HOST,\n+ 'sa-east-1': S3_SA_EAST_HOST,\n+ 'sa-east-2': S3_SA_SOUTHEAST2_HOST,\n+ 'ca-central-1': S3_CA_CENTRAL_HOST\n+}\n+\n API_VERSION = '2006-03-01'\n NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION)\n \n@@ -95,8 +117,12 @@ class S3Response(AWSBaseResponse):\n if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:\n raise InvalidCredsError(self.body)\n elif self.status == httplib.MOVED_PERMANENTLY:\n- raise LibcloudError('This bucket is located in a different ' +\n- 'region. Please use the correct driver.',\n+ bucket_region = self.headers.get('x-amz-bucket-region', None)\n+ used_region = self.connection.driver.region\n+ raise LibcloudError('This bucket is located in a different '\n+ 'region. Please use the correct driver.'\n+ 'Bucket region \"%s\", used region \"%s\"' %\n+ (bucket_region, used_region),\n driver=S3StorageDriver)\n raise LibcloudError('Unknown error. Status code: %d' % (self.status),\n driver=S3StorageDriver)\n@@ -1001,10 +1027,34 @@ class BaseS3StorageDriver(StorageDriver):\n \n \n class S3StorageDriver(AWSDriver, BaseS3StorageDriver):\n- name = 'Amazon S3 (us-east-1)'\n+ name = 'Amazon S3'\n connectionCls = S3SignatureV4Connection\n region_name = 'us-east-1'\n \n+ def __init__(self, key, secret=None, secure=True, host=None, port=None,\n+ region=None, token=None, **kwargs):\n+ # Here for backward compatibility for old and deprecated driver class per region\n+ # approach\n+ if hasattr(self, 'region_name') and not region:\n+ region = self.region_name # pylint: disable=no-member\n+\n+ self.region_name = region\n+\n+ if region and region not in REGION_TO_HOST_MAP.keys():\n+ raise ValueError('Invalid or unsupported region: %s' % (region))\n+\n+ self.name = 'Amazon S3 (%s)' % (region)\n+\n+ host = REGION_TO_HOST_MAP[region]\n+ super(S3StorageDriver, self).__init__(key=key, secret=secret,\n+ secure=secure, host=host,\n+ port=port,\n+ region=region, token=token,**kwargs)\n+\n+ @classmethod\n+ def list_regions(self):\n+ return REGION_TO_HOST_MAP.keys()\n+\n \n class S3USEast2Connection(S3SignatureV4Connection):\n host = S3_US_EAST2_HOST\n" } ]
21a78a17929f0633817c337208ab2a21dc0639f9
apache/libcloud
29.11.2019 23:13:26
Apache License 2.0
Update setup.py so it doesn't rely on any functions from libcloud packages. This way we avoid having setup.py depend on typing, requests and other modules libcloud depends on.
[ { "change_type": "MODIFY", "old_path": "setup.py", "new_path": "setup.py", "diff": "@@ -12,12 +12,14 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+\n import os\n import sys\n+import re\n+import fnmatch\n \n from setuptools import setup\n from distutils.core import Command\n-from os.path import join as pjoin\n \n try:\n import epydoc # NOQA\n@@ -25,11 +27,127 @@ try:\n except ImportError:\n has_epydoc = False\n \n+# NOTE: Those functions are intentionally moved in-line to prevent setup.py dependening on any\n+# Libcloud code which depends on libraries such as typing, enum, requests, etc.\n+# START: Taken From Twisted Python which licensed under MIT license\n+# https://github.com/powdahound/twisted/blob/master/twisted/python/dist.py\n+# https://github.com/powdahound/twisted/blob/master/LICENSE\n+\n+# Names that are excluded from globbing results:\n+EXCLUDE_NAMES = ['{arch}', 'CVS', '.cvsignore', '_darcs',\n+ 'RCS', 'SCCS', '.svn']\n+EXCLUDE_PATTERNS = ['*.py[cdo]', '*.s[ol]', '.#*', '*~', '*.py']\n+\n+\n+def _filter_names(names):\n+ \"\"\"\n+ Given a list of file names, return those names that should be copied.\n+ \"\"\"\n+ names = [n for n in names\n+ if n not in EXCLUDE_NAMES]\n+ # This is needed when building a distro from a working\n+ # copy (likely a checkout) rather than a pristine export:\n+ for pattern in EXCLUDE_PATTERNS:\n+ names = [n for n in names\n+ if not fnmatch.fnmatch(n, pattern) and not n.endswith('.py')]\n+ return names\n+\n+\n+def relative_to(base, relativee):\n+ \"\"\"\n+ Gets 'relativee' relative to 'basepath'.\n+\n+ i.e.,\n+\n+ >>> relative_to('/home/', '/home/radix/')\n+ 'radix'\n+ >>> relative_to('.', '/home/radix/Projects/Twisted')\n+ 'Projects/Twisted'\n+\n+ The 'relativee' must be a child of 'basepath'.\n+ \"\"\"\n+ basepath = os.path.abspath(base)\n+ relativee = os.path.abspath(relativee)\n+ if relativee.startswith(basepath):\n+ relative = relativee[len(basepath):]\n+ if relative.startswith(os.sep):\n+ relative = relative[1:]\n+ return os.path.join(base, relative)\n+ raise ValueError(\"%s is not a subpath of %s\" % (relativee, basepath))\n+\n+\n+def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None):\n+ \"\"\"\n+ Get all packages which are under dname. This is necessary for\n+ Python 2.2's distutils. Pretty similar arguments to getDataFiles,\n+ including 'parent'.\n+ \"\"\"\n+ parent = parent or \"\"\n+ prefix = []\n+ if parent:\n+ prefix = [parent]\n+ bname = os.path.basename(dname)\n+ ignore = ignore or []\n+ if bname in ignore:\n+ return []\n+ if results is None:\n+ results = []\n+ if pkgname is None:\n+ pkgname = []\n+ subfiles = os.listdir(dname)\n+ abssubfiles = [os.path.join(dname, x) for x in subfiles]\n \n-import libcloud.utils # NOQA\n-from libcloud.utils.dist import get_packages, get_data_files # NOQA\n+ if '__init__.py' in subfiles:\n+ results.append(prefix + pkgname + [bname])\n+ for subdir in filter(os.path.isdir, abssubfiles):\n+ get_packages(subdir, pkgname=pkgname + [bname],\n+ results=results, ignore=ignore,\n+ parent=parent)\n+ res = ['.'.join(result) for result in results]\n+ return res\n+\n+\n+def get_data_files(dname, ignore=None, parent=None):\n+ \"\"\"\n+ Get all the data files that should be included in this distutils Project.\n+\n+ 'dname' should be the path to the package that you're distributing.\n+\n+ 'ignore' is a list of sub-packages to ignore. This facilitates\n+ disparate package hierarchies. That's a fancy way of saying that\n+ the 'twisted' package doesn't want to include the 'twisted.conch'\n+ package, so it will pass ['conch'] as the value.\n+\n+ 'parent' is necessary if you're distributing a subpackage like\n+ twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'\n+ should point to 'twisted'. This ensures that your data_files are\n+ generated correctly, only using relative paths for the first element\n+ of the tuple ('twisted/conch/*').\n+ The default 'parent' is the current working directory.\n+ \"\"\"\n+ parent = parent or \".\"\n+ ignore = ignore or []\n+ result = []\n+ for directory, subdirectories, filenames in os.walk(dname):\n+ resultfiles = []\n+ for exname in EXCLUDE_NAMES:\n+ if exname in subdirectories:\n+ subdirectories.remove(exname)\n+ for ig in ignore:\n+ if ig in subdirectories:\n+ subdirectories.remove(ig)\n+ for filename in _filter_names(filenames):\n+ resultfiles.append(filename)\n+ if resultfiles:\n+ for filename in resultfiles:\n+ file_path = os.path.join(directory, filename)\n+ if parent:\n+ file_path = file_path.replace(parent + os.sep, '')\n+ result.append(file_path)\n+\n+ return result\n+# END: Taken from Twisted\n \n-libcloud.utils.SHOW_DEPRECATION_WARNING = False\n \n # Different versions of python have different requirements. We can't use\n # libcloud.utils.py3 here because it relies on backports dependency being\n@@ -76,11 +194,20 @@ if PY2_pre_27 or PY3_pre_34:\n \n def read_version_string():\n version = None\n- sys.path.insert(0, pjoin(os.getcwd()))\n- from libcloud import __version__\n- version = __version__\n- sys.path.pop(0)\n- return version\n+ cwd = os.path.dirname(os.path.abspath(__file__))\n+ version_file = os.path.join(cwd, 'libcloud/__init__.py')\n+\n+ with open(version_file) as fp:\n+ content = fp.read()\n+\n+ match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n+ content, re.M)\n+\n+ if match:\n+ version = match.group(1)\n+ return version\n+\n+ raise Exception('Cannot find version in libcloud/__init__.py')\n \n \n def forbid_publish():\n" }, { "change_type": "MODIFY", "old_path": "tox.ini", "new_path": "tox.ini", "diff": "@@ -30,12 +30,18 @@ whitelist_externals = cp\n scripts/*.sh\n [testenv:py2.7-dist]\n # Verify library installs without any dependencies\n-skipdist=False\n+skipdist = True\n+# NOTE: We intentionally set empty deps to ensure it works on a clean\n+# environment without any dependencies\n+deps =\n commands = python setup.py install\n \n [testenv:py3.7-dist]\n # Verify library installs without any dependencies\n-skipdist=False\n+skipdist = True\n+# NOTE: We intentionally set empty deps to ensure it works on a clean\n+# environment without any dependencies\n+deps =\n commands = python setup.py install\n \n [testenv:docs]\n" } ]
b9747bc011e9e9830ab147327d7aeaa8447ad2d7
apache/libcloud
20.02.2020 00:11:58
Apache License 2.0
Add new storage API methods for downloading part of an object (range download) and implement it for the S3 and local storage drivers.
[ { "change_type": "MODIFY", "old_path": "libcloud/storage/base.py", "new_path": "libcloud/storage/base.py", "diff": "@@ -443,6 +443,68 @@ class StorageDriver(BaseDriver):\n raise NotImplementedError(\n 'download_object_as_stream not implemented for this driver')\n \n+ def download_object_range(self, obj, destination_path, start_bytes,\n+ end_bytes=None, overwrite_existing=False,\n+ delete_on_failure=True):\n+ # type: (Object, str, int, Optional[int], bool, bool) -> bool\n+ \"\"\"\n+ Download part of an object.\n+\n+ :param obj: Object instance.\n+ :type obj: :class:`libcloud.storage.base.Object`\n+\n+ :param destination_path: Full path to a file or a directory where the\n+ incoming file will be saved.\n+ :type destination_path: ``str``\n+\n+ :param start_bytes: Start byte offset for the range download.\n+ :type start_bytes: ``int``\n+\n+ :param end_bytes: End byte offset for the range download. If not\n+ provided, it will assume end of the file.\n+ :type end_bytes: ``int``\n+\n+ :param overwrite_existing: True to overwrite an existing file,\n+ defaults to False.\n+ :type overwrite_existing: ``bool``\n+\n+ :param delete_on_failure: True to delete a partially downloaded file if\n+ the download was not successful (hash\n+ mismatch / file size).\n+ :type delete_on_failure: ``bool``\n+\n+ :return: True if an object has been successfully downloaded, False\n+ otherwise.\n+ :rtype: ``bool``\n+\n+ \"\"\"\n+ raise NotImplementedError(\n+ 'download_object_range not implemented for this driver')\n+\n+ def download_object_range_as_stream(self, obj, start_bytes, end_bytes=None,\n+ chunk_size=None):\n+ # type: (Object, int, Optional[int], Optional[int]) -> Iterator[bytes]\n+ \"\"\"\n+ Return a iterator which yields range / part of the object data.\n+\n+ :param obj: Object instance\n+ :type obj: :class:`libcloud.storage.base.Object`\n+\n+ :param start_bytes: Start byte offset for the range download.\n+ :type start_bytes: ``int``\n+\n+ :param end_bytes: End byte offset for the range download. If not\n+ provided, it will assume end of the file.\n+ :type end_bytes: ``int``\n+\n+ :param chunk_size: Optional chunk size (in bytes).\n+ :type chunk_size: ``int``\n+\n+ :rtype: ``iterator`` of ``bytes``\n+ \"\"\"\n+ raise NotImplementedError(\n+ 'download_object_range_as_stream not implemented for this driver')\n+\n def upload_object(self, file_path, container, object_name, extra=None,\n verify_hash=True, headers=None):\n # type: (str, Container, str, Optional[dict], bool, Optional[Dict[str, str]]) -> Object # noqa: E501\n@@ -602,7 +664,7 @@ class StorageDriver(BaseDriver):\n \n def _save_object(self, response, obj, destination_path,\n overwrite_existing=False, delete_on_failure=True,\n- chunk_size=None):\n+ chunk_size=None, partial_download=False):\n \"\"\"\n Save object to the provided path.\n \n@@ -627,6 +689,10 @@ class StorageDriver(BaseDriver):\n (defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb)\n :type chunk_size: ``int``\n \n+ :param partial_download: True if this is a range (partial) save,\n+ False otherwise.\n+ :type partial_download: ``bool``\n+\n :return: ``True`` on success, ``False`` otherwise.\n :rtype: ``bool``\n \"\"\"\n@@ -658,8 +724,10 @@ class StorageDriver(BaseDriver):\n file_handle.write(b(chunk))\n bytes_transferred += len(chunk)\n \n- if int(obj.size) != int(bytes_transferred):\n+ if not partial_download and int(obj.size) != int(bytes_transferred):\n # Transfer failed, support retry?\n+ # NOTE: We only perform this check if this is a regular and not a\n+ # partial / range download\n if delete_on_failure:\n try:\n os.unlink(file_path)\n" }, { "change_type": "MODIFY", "old_path": "libcloud/storage/drivers/local.py", "new_path": "libcloud/storage/drivers/local.py", "diff": "@@ -31,6 +31,7 @@ except ImportError:\n 'using pip: pip install lockfile')\n \n from libcloud.utils.files import read_in_chunks\n+from libcloud.utils.files import exhaust_iterator\n from libcloud.utils.py3 import relpath\n from libcloud.utils.py3 import u\n from libcloud.common.base import Connection\n@@ -416,6 +417,52 @@ class LocalStorageDriver(StorageDriver):\n for data in read_in_chunks(obj_file, chunk_size=chunk_size):\n yield data\n \n+ def download_object_range(self, obj, destination_path, start_bytes,\n+ end_bytes=None, overwrite_existing=False,\n+ delete_on_failure=True):\n+ base_name = os.path.basename(destination_path)\n+\n+ if not base_name and not os.path.exists(destination_path):\n+ raise LibcloudError(\n+ value='Path %s does not exist' % (destination_path),\n+ driver=self)\n+\n+ if not base_name:\n+ file_path = os.path.join(destination_path, obj.name)\n+ else:\n+ file_path = destination_path\n+\n+ if os.path.exists(file_path) and not overwrite_existing:\n+ raise LibcloudError(\n+ value='File %s already exists, but ' % (file_path) +\n+ 'overwrite_existing=False',\n+ driver=self)\n+\n+ iterator = self.download_object_range_as_stream(\n+ obj=obj,\n+ start_bytes=start_bytes,\n+ end_bytes=end_bytes)\n+\n+ with open(file_path, 'wb') as fp:\n+ fp.write(exhaust_iterator(iterator))\n+\n+ return True\n+\n+ def download_object_range_as_stream(self, obj, start_bytes, end_bytes=None,\n+ chunk_size=None):\n+ path = self.get_object_cdn_url(obj)\n+ with open(path, 'rb') as obj_file:\n+ file_size = len(obj_file.read())\n+\n+ if not end_bytes:\n+ read_bytes = file_size\n+ else:\n+ read_bytes = (file_size - end_bytes - start_bytes) - 1\n+\n+ obj_file.seek(start_bytes)\n+ data = obj_file.read(read_bytes)\n+ yield data\n+\n def upload_object(self, file_path, container, object_name, extra=None,\n verify_hash=True, headers=None):\n \"\"\"\n" }, { "change_type": "MODIFY", "old_path": "libcloud/storage/drivers/s3.py", "new_path": "libcloud/storage/drivers/s3.py", "diff": "@@ -112,7 +112,7 @@ RESPONSES_PER_REQUEST = 100\n class S3Response(AWSBaseResponse):\n namespace = None\n valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,\n- httplib.BAD_REQUEST]\n+ httplib.BAD_REQUEST, httplib.PARTIAL_CONTENT]\n \n def success(self):\n i = int(self.status)\n@@ -469,6 +469,54 @@ class BaseS3StorageDriver(StorageDriver):\n 'chunk_size': chunk_size},\n success_status_code=httplib.OK)\n \n+ def download_object_range(self, obj, destination_path, start_bytes,\n+ end_bytes=None, overwrite_existing=False,\n+ delete_on_failure=True):\n+ obj_path = self._get_object_path(obj.container, obj.name)\n+\n+ range_str = 'bytes=%s-' % (start_bytes)\n+\n+ if end_bytes:\n+ range_str += str(end_bytes)\n+\n+ headers = {'Range': range_str}\n+\n+ response = self.connection.request(obj_path, method='GET',\n+ headers=headers, raw=True)\n+\n+ return self._get_object(obj=obj, callback=self._save_object,\n+ response=response,\n+ callback_kwargs={\n+ 'obj': obj,\n+ 'response': response.response,\n+ 'destination_path': destination_path,\n+ 'overwrite_existing': overwrite_existing,\n+ 'delete_on_failure': delete_on_failure,\n+ 'partial_download': True},\n+ success_status_code=httplib.PARTIAL_CONTENT)\n+\n+ def download_object_range_as_stream(self, obj, start_bytes, end_bytes=None,\n+ chunk_size=None):\n+ obj_path = self._get_object_path(obj.container, obj.name)\n+\n+ range_str = 'bytes=%s-' % (start_bytes)\n+\n+ if end_bytes:\n+ range_str += str(end_bytes)\n+\n+ headers = {'Range': range_str}\n+\n+ response = self.connection.request(obj_path, method='GET',\n+ headers=headers,\n+ stream=True, raw=True)\n+\n+ return self._get_object(\n+ obj=obj, callback=read_in_chunks,\n+ response=response,\n+ callback_kwargs={'iterator': response.iter_content(CHUNK_SIZE),\n+ 'chunk_size': chunk_size},\n+ success_status_code=httplib.PARTIAL_CONTENT)\n+\n def upload_object(self, file_path, container, object_name, extra=None,\n verify_hash=True, headers=None, ex_storage_class=None):\n \"\"\"\n" } ]
76cde3007a60ced15ffa7c1eee9f6fafd5baa3c6
apache/libcloud
28.02.2020 22:01:44
Apache License 2.0
Implement Range downloads for the Azure Blobs driver and move common method which can be re-used by S3 based and Azure drivers to the base driver class.
[ { "change_type": "MODIFY", "old_path": "libcloud/common/azure.py", "new_path": "libcloud/common/azure.py", "diff": "@@ -48,9 +48,11 @@ class AzureResponse(XmlResponse):\n httplib.NOT_FOUND,\n httplib.CONFLICT,\n httplib.BAD_REQUEST,\n- httplib.TEMPORARY_REDIRECT\n # added TEMPORARY_REDIRECT as this can sometimes be\n # sent by azure instead of a success or fail response\n+ httplib.TEMPORARY_REDIRECT,\n+ # Used by Azure Blobs range downloads\n+ httplib.PARTIAL_CONTENT\n ]\n \n def success(self):\n" }, { "change_type": "MODIFY", "old_path": "libcloud/storage/base.py", "new_path": "libcloud/storage/base.py", "diff": "@@ -919,3 +919,26 @@ class StorageDriver(BaseDriver):\n raise ValueError('start_bytes must be smaller than end_bytes')\n \n return True\n+\n+ def _get_standard_range_str(self, start_bytes, end_bytes=None):\n+ # type: (int, Optional[int]) -> str\n+ \"\"\"\n+ Return range string which is used as a Range header value for range\n+ requests for drivers which follow standard Range header notation\n+\n+ This returns range string in the following format:\n+ bytes=<start_bytes>-<end bytes>.\n+\n+ For example:\n+\n+ bytes=1-10\n+ bytes=0-2\n+ bytes=5-\n+ bytes=100-5000\n+ \"\"\"\n+ range_str = 'bytes=%s-' % (start_bytes)\n+\n+ if end_bytes is not None:\n+ range_str += str(end_bytes)\n+\n+ return range_str\n" }, { "change_type": "MODIFY", "old_path": "libcloud/storage/drivers/azure_blobs.py", "new_path": "libcloud/storage/drivers/azure_blobs.py", "diff": "@@ -684,6 +684,50 @@ class AzureBlobsStorageDriver(StorageDriver):\n 'chunk_size': chunk_size},\n success_status_code=httplib.OK)\n \n+ def download_object_range(self, obj, destination_path, start_bytes,\n+ end_bytes=None, overwrite_existing=False,\n+ delete_on_failure=True):\n+ self._validate_start_and_end_bytes(start_bytes=start_bytes,\n+ end_bytes=end_bytes)\n+\n+ obj_path = self._get_object_path(obj.container, obj.name)\n+ headers = {'Range': self._get_standard_range_str(start_bytes,\n+ end_bytes)}\n+ response = self.connection.request(obj_path, headers=headers,\n+ raw=True, data=None)\n+\n+ return self._get_object(obj=obj, callback=self._save_object,\n+ response=response,\n+ callback_kwargs={\n+ 'obj': obj,\n+ 'response': response.response,\n+ 'destination_path': destination_path,\n+ 'overwrite_existing': overwrite_existing,\n+ 'delete_on_failure': delete_on_failure,\n+ 'partial_download': True},\n+ success_status_code=httplib.PARTIAL_CONTENT)\n+\n+ def download_object_range_as_stream(self, obj, start_bytes, end_bytes=None,\n+ chunk_size=None):\n+ self._validate_start_and_end_bytes(start_bytes=start_bytes,\n+ end_bytes=end_bytes)\n+\n+ obj_path = self._get_object_path(obj.container, obj.name)\n+\n+ headers = {'Range': self._get_standard_range_str(start_bytes,\n+ end_bytes)}\n+ response = self.connection.request(obj_path, method='GET',\n+ headers=headers,\n+ stream=True, raw=True)\n+ iterator = response.iter_content(AZURE_DOWNLOAD_CHUNK_SIZE)\n+\n+ return self._get_object(\n+ obj=obj, callback=read_in_chunks,\n+ response=response,\n+ callback_kwargs={'iterator': iterator,\n+ 'chunk_size': chunk_size},\n+ success_status_code=httplib.PARTIAL_CONTENT)\n+\n def _upload_in_chunks(self, stream, object_path, lease, meta_data,\n content_type, object_name, file_path, verify_hash,\n headers):\n" }, { "change_type": "MODIFY", "old_path": "libcloud/storage/drivers/s3.py", "new_path": "libcloud/storage/drivers/s3.py", "diff": "@@ -13,8 +13,6 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import Optional\n-\n import base64\n import hmac\n import time\n@@ -479,7 +477,8 @@ class BaseS3StorageDriver(StorageDriver):\n \n obj_path = self._get_object_path(obj.container, obj.name)\n \n- headers = {'Range': self._get_range_str(start_bytes, end_bytes)}\n+ headers = {'Range': self._get_standard_range_str(start_bytes,\n+ end_bytes)}\n response = self.connection.request(obj_path, method='GET',\n headers=headers, raw=True)\n \n@@ -501,7 +500,8 @@ class BaseS3StorageDriver(StorageDriver):\n \n obj_path = self._get_object_path(obj.container, obj.name)\n \n- headers = {'Range': self._get_range_str(start_bytes, end_bytes)}\n+ headers = {'Range': self._get_standard_range_str(start_bytes,\n+ end_bytes)}\n response = self.connection.request(obj_path, method='GET',\n headers=headers,\n stream=True, raw=True)\n@@ -846,19 +846,6 @@ class BaseS3StorageDriver(StorageDriver):\n delimiter=None):\n self._abort_multipart(container, upload.key, upload.id)\n \n- def _get_range_str(self, start_bytes, end_bytes=None):\n- # type: (int, Optional[int]) -> str\n- \"\"\"\n- Return range string which is used as a Range header value for range\n- requests.\n- \"\"\"\n- range_str = 'bytes=%s-' % (start_bytes)\n-\n- if end_bytes:\n- range_str += str(end_bytes)\n-\n- return range_str\n-\n def _clean_object_name(self, name):\n name = urlquote(name)\n return name\n" } ]
715717703719d00dae7ee11916ecafd614bbcc6c
apache/libcloud
01.04.2020 13:08:01
Apache License 2.0
Allow user to pass "timeout" argument to ScriptDeployment and ScriptFileDeployment class. With this argument, user can specify optional command run timeout for those deployment steps.
[ { "change_type": "MODIFY", "old_path": "libcloud/compute/deployment.py", "new_path": "libcloud/compute/deployment.py", "diff": "@@ -138,8 +138,14 @@ class ScriptDeployment(Deployment):\n you are running a plan shell script.\n \"\"\"\n \n- def __init__(self, script, args=None, name=None, delete=False):\n- # type: (str, Optional[List[str]], Optional[str], bool) -> None\n+ def __init__(self,\n+ script, # type: str\n+ args=None, # type: Optional[List[str]]\n+ name=None, # type: Optional[str]\n+ delete=False, # type bool\n+ timeout=None # type: Optional[float]\n+ ):\n+ # type: (...) -> None\n \"\"\"\n :type script: ``str``\n :keyword script: Contents of the script to run.\n@@ -154,6 +160,9 @@ class ScriptDeployment(Deployment):\n \n :type delete: ``bool``\n :keyword delete: Whether to delete the script on completion.\n+\n+ :param timeout: Optional run timeout for this command.\n+ :type timeout: ``float``\n \"\"\"\n script = self._get_string_value(argument_name='script',\n argument_value=script)\n@@ -164,6 +173,7 @@ class ScriptDeployment(Deployment):\n self.stderr = None # type: Optional[str]\n self.exit_status = None # type: Optional[int]\n self.delete = delete\n+ self.timeout = timeout\n self.name = name # type: Optional[str]\n \n if self.name is None:\n@@ -202,7 +212,8 @@ class ScriptDeployment(Deployment):\n else:\n cmd = name\n \n- self.stdout, self.stderr, self.exit_status = client.run(cmd)\n+ self.stdout, self.stderr, self.exit_status = \\\n+ client.run(cmd, timeout=self.timeout)\n \n if self.delete:\n client.delete(self.name)\n@@ -234,8 +245,14 @@ class ScriptFileDeployment(ScriptDeployment):\n the script content.\n \"\"\"\n \n- def __init__(self, script_file, args=None, name=None, delete=False):\n- # type: (str, Optional[List[str]], Optional[str], bool) -> None\n+ def __init__(self,\n+ script_file, # type: str\n+ args=None, # type: Optional[List[str]]\n+ name=None, # type: Optional[str]\n+ delete=False, # type bool\n+ timeout=None # type: Optional[float]\n+ ):\n+ # type: (...) -> None\n \"\"\"\n :type script_file: ``str``\n :keyword script_file: Path to a file containing the script to run.\n@@ -251,6 +268,9 @@ class ScriptFileDeployment(ScriptDeployment):\n \n :type delete: ``bool``\n :keyword delete: Whether to delete the script on completion.\n+\n+ :param timeout: Optional run timeout for this command.\n+ :type timeout: ``float``\n \"\"\"\n with open(script_file, 'rb') as fp:\n content = fp.read() # type: Union[bytes, str]\n@@ -262,7 +282,8 @@ class ScriptFileDeployment(ScriptDeployment):\n super(ScriptFileDeployment, self).__init__(script=content,\n args=args,\n name=name,\n- delete=delete)\n+ delete=delete,\n+ timeout=timeout)\n \n \n class MultiStepDeployment(Deployment):\n" }, { "change_type": "MODIFY", "old_path": "libcloud/compute/ssh.py", "new_path": "libcloud/compute/ssh.py", "diff": "@@ -178,8 +178,8 @@ class BaseSSHClient(object):\n raise NotImplementedError(\n 'delete not implemented for this ssh client')\n \n- def run(self, cmd):\n- # type: (str) -> Tuple[str, str, int]\n+ def run(self, cmd, timeout=None):\n+ # type: (str, Optional[float]) -> Tuple[str, str, int]\n \"\"\"\n Run a command on a remote node.\n \n@@ -616,7 +616,7 @@ class ShellOutSSHClient(BaseSSHClient):\n \"\"\"\n return True\n \n- def run(self, cmd):\n+ def run(self, cmd, timeout=None):\n return self._run_remote_shell_command([cmd])\n \n def put(self, path, contents=None, chmod=None, mode='w'):\n" }, { "change_type": "MODIFY", "old_path": "libcloud/test/compute/test_deployment.py", "new_path": "libcloud/test/compute/test_deployment.py", "diff": "@@ -60,15 +60,19 @@ class MockDeployment(Deployment):\n \n class MockClient(BaseSSHClient):\n \n- def __init__(self, *args, **kwargs):\n+ def __init__(self, throw_on_timeout=False, *args, **kwargs):\n self.stdout = ''\n self.stderr = ''\n self.exit_status = 0\n+ self.throw_on_timeout = throw_on_timeout\n \n def put(self, path, contents, chmod=755, mode='w'):\n return contents\n \n- def run(self, name):\n+ def run(self, cmd, timeout=None):\n+ if self.throw_on_timeout and timeout is not None:\n+ raise ValueError(\"timeout\")\n+\n return self.stdout, self.stderr, self.exit_status\n \n def delete(self, name):\n@@ -120,14 +124,25 @@ class DeploymentTests(unittest.TestCase):\n sd2 = ScriptDeployment(script='foobar', delete=False)\n sd3 = ScriptDeployment(\n script='foobar', delete=False, name='foobarname')\n+ sd4 = ScriptDeployment(\n+ script='foobar', delete=False, name='foobarname', timeout=10)\n \n self.assertTrue(sd1.name.find('deployment') != '1')\n self.assertEqual(sd3.name, 'foobarname')\n+ self.assertEqual(sd3.timeout, None)\n+ self.assertEqual(sd4.timeout, 10)\n \n self.assertEqual(self.node, sd1.run(node=self.node,\n client=MockClient(hostname='localhost')))\n self.assertEqual(self.node, sd2.run(node=self.node,\n client=MockClient(hostname='localhost')))\n+ self.assertEqual(self.node, sd3.run(node=self.node,\n+ client=MockClient(hostname='localhost')))\n+\n+ assertRaisesRegex(self, ValueError, 'timeout', sd4.run,\n+ node=self.node,\n+ client=MockClient(hostname='localhost',\n+ throw_on_timeout=True))\n \n def test_script_file_deployment(self):\n file_path = os.path.abspath(__file__)\n@@ -139,6 +154,10 @@ class DeploymentTests(unittest.TestCase):\n \n sfd1 = ScriptFileDeployment(script_file=file_path)\n self.assertEqual(sfd1.script, content)\n+ self.assertEqual(sfd1.timeout, None)\n+\n+ sfd2 = ScriptFileDeployment(script_file=file_path, timeout=20)\n+ self.assertEqual(sfd2.timeout, 20)\n \n def test_script_deployment_relative_path(self):\n client = Mock()\n@@ -148,7 +167,7 @@ class DeploymentTests(unittest.TestCase):\n sd = ScriptDeployment(script='echo \"foo\"', name='relative.sh')\n sd.run(self.node, client)\n \n- client.run.assert_called_once_with(FILE_PATH)\n+ client.run.assert_called_once_with(FILE_PATH, timeout=None)\n \n def test_script_deployment_absolute_path(self):\n client = Mock()\n@@ -160,7 +179,7 @@ class DeploymentTests(unittest.TestCase):\n sd = ScriptDeployment(script='echo \"foo\"', name=file_path)\n sd.run(self.node, client)\n \n- client.run.assert_called_once_with(file_path)\n+ client.run.assert_called_once_with(file_path, timeout=None)\n \n def test_script_deployment_with_arguments(self):\n client = Mock()\n@@ -175,7 +194,7 @@ class DeploymentTests(unittest.TestCase):\n sd.run(self.node, client)\n \n expected = '%s arg1 arg2 --option1=test' % (file_path)\n- client.run.assert_called_once_with(expected)\n+ client.run.assert_called_once_with(expected, timeout=None)\n \n client.reset_mock()\n \n@@ -185,7 +204,7 @@ class DeploymentTests(unittest.TestCase):\n sd.run(self.node, client)\n \n expected = file_path\n- client.run.assert_called_once_with(expected)\n+ client.run.assert_called_once_with(expected, timeout=None)\n \n def test_script_file_deployment_with_arguments(self):\n file_path = os.path.abspath(__file__)\n@@ -200,7 +219,7 @@ class DeploymentTests(unittest.TestCase):\n sfd.run(self.node, client)\n \n expected = '%s arg1 arg2 --option1=test option2' % (file_path)\n- client.run.assert_called_once_with(expected)\n+ client.run.assert_called_once_with(expected, timeout=None)\n \n def test_script_deployment_and_sshkey_deployment_argument_types(self):\n class FileObject(object):\n@@ -485,6 +504,7 @@ class DeploymentTests(unittest.TestCase):\n # the arguments\n global call_count\n call_count = 0\n+\n def create_node(name, image, size, ex_custom_arg_1, ex_custom_arg_2,\n ex_foo=None, auth=None, **kwargs):\n global call_count\n" } ]
7516dd4cbaa76c8ea93e63bc0c006a12b4fa5ff1
apache/libcloud
08.12.2021 13:45:36
Apache License 2.0
Update AWS error response XML parsing code so we also handle situation where response doesn't have a namespace. It looks like that in some cases AWS returns error response without a namespace which previous version of the code didn't handle correctly.
[ { "change_type": "MODIFY", "old_path": "libcloud/common/aws.py", "new_path": "libcloud/common/aws.py", "diff": "@@ -37,6 +37,8 @@ from libcloud.common.base import JsonResponse\n from libcloud.common.types import InvalidCredsError, MalformedResponseError\n from libcloud.utils.py3 import b, httplib, urlquote\n from libcloud.utils.xml import findtext, findall\n+from libcloud.utils.xml import findall_ignore_namespace\n+from libcloud.utils.xml import findtext_ignore_namespace\n \n __all__ = [\n \"AWSBaseResponse\",\n@@ -79,8 +81,12 @@ class AWSBaseResponse(XmlResponse):\n :return: ``tuple`` with two elements: (code, message)\n :rtype: ``tuple``\n \"\"\"\n- code = findtext(element=element, xpath=\"Code\", namespace=self.namespace)\n- message = findtext(element=element, xpath=\"Message\", namespace=self.namespace)\n+ code = findtext_ignore_namespace(\n+ element=element, xpath=\"Code\", namespace=self.namespace\n+ )\n+ message = findtext_ignore_namespace(\n+ element=element, xpath=\"Message\", namespace=self.namespace\n+ )\n \n return code, message\n \n@@ -120,7 +126,9 @@ class AWSGenericResponse(AWSBaseResponse):\n )\n \n if self.xpath:\n- errs = findall(element=body, xpath=self.xpath, namespace=self.namespace)\n+ errs = findall_ignore_namespace(\n+ element=body, xpath=self.xpath, namespace=self.namespace\n+ )\n else:\n errs = [body]\n \n" }, { "change_type": "MODIFY", "old_path": "libcloud/utils/xml.py", "new_path": "libcloud/utils/xml.py", "diff": "@@ -13,7 +13,14 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-__all__ = [\"fixxpath\", \"findtext\", \"findattr\", \"findall\"]\n+__all__ = [\n+ \"fixxpath\",\n+ \"findtext\",\n+ \"findattr\",\n+ \"findall\",\n+ \"findall_ignore_namespace\",\n+ \"findtext_ignore_namespace\",\n+]\n \n \n def fixxpath(xpath, namespace=None):\n@@ -36,9 +43,44 @@ def findtext(element, xpath, namespace=None, no_text_value=\"\"):\n return value\n \n \n+def findtext_ignore_namespace(element, xpath, namespace=None, no_text_value=\"\"):\n+ \"\"\"\n+ Special version of findtext() which first tries to find the provided value using the provided\n+ namespace and in case no results are found we fallback to the xpath lookup without namespace.\n+\n+ This is needed because some providers return some responses with namespace and some without.\n+ \"\"\"\n+\n+ result = findtext(\n+ element=element, xpath=xpath, namespace=namespace, no_text_value=no_text_value\n+ )\n+\n+ if not result and namespace:\n+ result = findtext(\n+ element=element, xpath=xpath, namespace=None, no_text_value=no_text_value\n+ )\n+\n+ return result\n+\n+\n def findattr(element, xpath, namespace=None):\n return element.findtext(fixxpath(xpath=xpath, namespace=namespace))\n \n \n def findall(element, xpath, namespace=None):\n return element.findall(fixxpath(xpath=xpath, namespace=namespace))\n+\n+\n+def findall_ignore_namespace(element, xpath, namespace=None):\n+ \"\"\"\n+ Special version of findall() which first tries to find the provided value using the provided\n+ namespace and in case no results are found we fallback to the xpath lookup without namespace.\n+\n+ This is needed because some providers return some responses with namespace and some without.\n+ \"\"\"\n+ result = findall(element=element, xpath=xpath, namespace=namespace)\n+\n+ if not result and namespace:\n+ result = findall(element=element, xpath=xpath, namespace=None)\n+\n+ return result\n" } ]
b76ed0db81b3123ede5dc5e5f1bddf36336f3722
apache/libcloud
05.03.2022 17:52:34
Apache License 2.0
Add tests which verify that all OpenStack driver can be instantiated with all the supported auth versions. NOTE: Those tests will fail right now due to the regressions being introduced recently which breaks auth for some versions.
[ { "change_type": "MODIFY", "old_path": "libcloud/test/compute/test_openstack.py", "new_path": "libcloud/test/compute/test_openstack.py", "diff": "@@ -39,6 +39,7 @@ from libcloud.utils.py3 import u\n from libcloud.common.base import LibcloudConnection\n from libcloud.common.exceptions import BaseHTTPError\n from libcloud.common.openstack_identity import OpenStackAuthenticationCache\n+from libcloud.common.openstack_identity import AUTH_VERSIONS_WITH_EXPIRES\n from libcloud.common.types import (\n InvalidCredsError,\n MalformedResponseError,\n@@ -3955,6 +3956,54 @@ class OpenStack_2_0_MockHttp(OpenStack_1_1_MockHttp):\n return (httplib.UNAUTHORIZED, \"\", {}, httplib.responses[httplib.UNAUTHORIZED])\n \n \n+class OpenStack_AllAuthVersions_MockHttp(MockHttp):\n+ def __init__(self, *args, **kwargs):\n+ super(OpenStack_AllAuthVersions_MockHttp, self).__init__(*args, **kwargs)\n+\n+ # Lazy import to avoid cyclic depedency issue\n+ from libcloud.test.common.test_openstack_identity import OpenStackIdentity_2_0_MockHttp\n+ from libcloud.test.common.test_openstack_identity import OpenStackIdentity_3_0_MockHttp\n+\n+ self.mock_http = OpenStackMockHttp(*args, **kwargs)\n+ self.mock_http_1_1 = OpenStack_1_1_MockHttp(*args, **kwargs)\n+ self.mock_http_2_0 = OpenStack_2_0_MockHttp(*args, **kwargs)\n+ self.mock_http_2_0_identity = OpenStackIdentity_2_0_MockHttp(*args, **kwargs)\n+ self.mock_http_3_0_identity = OpenStackIdentity_3_0_MockHttp(*args, **kwargs)\n+\n+ def _v1_0_slug_servers_detail(self, method, url, body, headers):\n+ return self.mock_http_1_1._v1_1_slug_servers_detail(method=method, url=url, body=body, headers=headers)\n+ return res\n+\n+ def _v1_1_auth(self, method, url, body, headers):\n+ return self.mock_http._v1_1_auth(method=method, url=url, body=body, headers=headers)\n+\n+ def _v2_0_tokens(self, method, url, body, headers):\n+ return self.mock_http_2_0._v2_0_tokens(method=method, url=url, body=body, headers=headers)\n+\n+ def _v2_1337_servers_detail(self, method, url, body, headers):\n+ return self.mock_http_2_0._v2_1337_servers_detail(method=method, url=url, body=body, headers=headers)\n+\n+ def _v2_0_tenants(self, method, url, body, headers):\n+ return self.mock_http_2_0_identity._v2_0_tenants(method=method, url=url, body=body, headers=headers)\n+\n+ def _v2_9c4693dce56b493b9b83197d900f7fba_servers_detail(self, method, url, body, headers):\n+ return self.mock_http_1_1._v1_1_slug_servers_detail(method=method, url=url, body=body, headers=headers)\n+\n+ def _v3_OS_FEDERATION_identity_providers_user_name_protocols_tenant_name_auth(\n+ self, method, url, body, headers\n+ ):\n+ return self.mock_http_3_0_identity._v3_OS_FEDERATION_identity_providers_test_user_id_protocols_test_tenant_auth(method=method, url=url, body=body, headers=headers)\n+\n+ def _v3_auth_tokens(self, method, url, body, headers):\n+ return self.mock_http_2_0._v3_auth_tokens(method=method, url=url, body=body, headers=headers)\n+\n+ def _v3_0_auth_tokens(self, method, url, body, headers):\n+ return self.mock_http_3_0_identity._v3_0_auth_tokens(method=method, url=url, body=body, headers=headers)\n+\n+ def _v3_auth_projects(self, method, url, body, headers):\n+ return self.mock_http_3_0_identity._v3_auth_projects(method=method, url=url, body=body, headers=headers)\n+\n+\n class OpenStack_1_1_Auth_2_0_Tests(OpenStack_1_1_Tests):\n driver_args = OPENSTACK_PARAMS + (\"1.1\",)\n driver_kwargs = {\"ex_force_auth_version\": \"2.0\"}\n@@ -3989,6 +4038,52 @@ class OpenStack_1_1_Auth_2_0_Tests(OpenStack_1_1_Tests):\n )\n \n \n+class OpenStack_AuthVersions_Tests(unittest.TestCase):\n+\n+ def setUp(self):\n+ # monkeypatch get_endpoint because the base openstack driver doesn't actually\n+ # work with old devstack but this class/tests are still used by the rackspace\n+ # driver\n+ def get_endpoint(*args, **kwargs):\n+ return \"https://servers.api.rackspacecloud.com/v1.0/slug\"\n+\n+ OpenStack_1_1_NodeDriver.connectionCls.get_endpoint = get_endpoint\n+\n+ def test_ex_force_auth_version_all_possible_values(self):\n+ \"\"\"\n+ Test case which verifies that the driver can be correctly instantiated using all the\n+ supported API versions.\n+ \"\"\"\n+ OpenStack_1_1_NodeDriver.connectionCls.conn_class = OpenStack_AllAuthVersions_MockHttp\n+ OpenStackMockHttp.type = None\n+ OpenStack_1_1_MockHttp.type = None\n+ OpenStack_2_0_MockHttp.type = None\n+\n+ cls = get_driver(Provider.OPENSTACK)\n+\n+ for auth_version in AUTH_VERSIONS_WITH_EXPIRES:\n+ driver_kwargs = {}\n+\n+ if auth_version == \"1.1\":\n+ # 1.1 is old and deprecated so we skip it\n+ pass\n+\n+ user_id = OPENSTACK_PARAMS[0]\n+ key = OPENSTACK_PARAMS[1]\n+\n+ if auth_version.startswith(\"3.x\"):\n+ driver_kwargs[\"ex_domina_name\"] = \"domain-name\"\n+ driver_kwargs[\"ex_force_service_region\"] = \"regionOne\"\n+ driver_kwargs[\"ex_tenant_name\"] = \"tenant-name\"\n+\n+ if auth_version == \"3.x_oidc_access_token\":\n+ key = \"test_key\"\n+\n+ driver = cls(user_id, key, ex_force_auth_url=\"http://x.y.z.y:5000\", ex_force_auth_version=auth_version, **driver_kwargs)\n+ nodes = driver.list_nodes()\n+ self.assertTrue(len(nodes) >= 1)\n+\n+\n class OpenStackMockAuthCache(OpenStackAuthenticationCache):\n def __init__(self):\n self.reset()\n" } ]
ceee67b50dcb9b7463c9e6bd0a4fe4af8216d4d1
usc-isi-i2/kgtk
02.12.2020 17:56:36
MIT License
Support aliasing of input files and querying from stdin. Because an input file might be referenced by its alias now, we removed various 'os.path.realpath' normalizations and use 'normalize_file_path' more selectively now.
[ { "change_type": "MODIFY", "old_path": "kgtk/kypher/sqlstore.py", "new_path": "kgtk/kypher/sqlstore.py", "diff": "@@ -29,6 +29,9 @@ pp = pprint.PrettyPrinter(indent=4)\n # - absolute file names are an issue when distributing the store\n # - support some minimal sanity checking such as empty files, etc.\n # - handle column name dealiasing and normalization\n+# - explanation runs outside the sqlite connection and thus does not see\n+# user functions such as kgtk_stringify and friends which causes errors;\n+# see if we can fix this somehow\n # - support declaring and dropping of (temporary) graphs that are only used\n # once or a few times\n # - allow in-memory graphs\n@@ -45,7 +48,7 @@ pp = pprint.PrettyPrinter(indent=4)\n # - improve table definitions to define core columns as required to be not null\n # - full LRU cache maintainance, but maybe abandon the whole LRU concept and\n # call it a store and not a cache\n-# - complete literal accessor functions\n+# + complete literal accessor functions\n # + handle VACUUM and/or AUTO_VACUUM when graph tables get deleted\n # - actually no, that requires a lot of extra space, so require to do that manually\n \n@@ -423,17 +426,37 @@ class SqliteStore(SqlStore):\n \n \n ### File information and access:\n+\n+ # Each fileinfo record is identified by a name key which defaults to the full\n+ # dereferenced realpath of the file from which the graph data was loaded.\n+ # If an alias was provided that name will be stored as the key instead.\n+\n+ def normalize_file_path(self, file):\n+ if os.path.basename(file) in ('-', 'stdin'):\n+ return '/dev/stdin'\n+ else:\n+ return os.path.realpath(file)\n+\n+ def is_standard_input(self, file):\n+ return self.normalize_file_path(file) == '/dev/stdin'\n \n- def get_file_info(self, file):\n- \"\"\"Return a dict info structure for the file info for 'file' (there can only be one),\n- or None if this file does not exist in the file table. All column keys will be set\n- although some values may be None.\n+ def get_file_info(self, file, alias=None, exact=False):\n+ \"\"\"Return a dict info structure for the file info for 'file' (or 'alias') or None\n+ if this file does not exist in the file table. All column keys will be set in\n+ the result although some values may be None. If 'exact', use 'file' as is and\n+ do not try to normalize it to an absolute path.\n \"\"\"\n- return self.get_record_info(self.FILE_TABLE, os.path.realpath(file))\n+ info = self.get_record_info(self.FILE_TABLE, file)\n+ if info is None and alias is not None:\n+ info = self.get_record_info(self.FILE_TABLE, alias)\n+ if info is None and not exact:\n+ file = self.normalize_file_path(file)\n+ info = self.get_record_info(self.FILE_TABLE, file)\n+ return info\n \n def set_file_info(self, file, size=None, modtime=None, graph=None):\n info = sdict()\n- info.file = os.path.realpath(file)\n+ info.file = file\n info.size = size\n info.modtime = modtime\n info.graph = graph\n@@ -443,7 +466,24 @@ class SqliteStore(SqlStore):\n \"\"\"Delete the file info record for 'file'.\n IMPORTANT: this does not delete any graph data associated with 'file'.\n \"\"\"\n- self.drop_record_info(self.FILE_TABLE, os.path.realpath(file))\n+ self.drop_record_info(self.FILE_TABLE, file)\n+\n+ def set_file_alias(self, file, alias):\n+ \"\"\"Set the file column of the file info identified by 'file' (or 'alias') to 'alias'.\n+ Raises an error if no relevant file info could be found, or if 'alias' is already\n+ used in a different file info (in which case it wouldn't be a unique key anymore).\n+ \"\"\"\n+ finfo = self.get_file_info(file, alias=alias)\n+ if finfo is None:\n+ raise KGTKException('cannot set alias for non-existent file: %s' % file)\n+ ainfo = self.get_file_info(alias, exact=True)\n+ if ainfo is not None and ainfo != finfo:\n+ # this can happen if we imported 'file' without an alias, then another file\n+ # with 'alias', and then we try to associate 'alias' to 'file':\n+ raise KGTKException('alias %s is already in use for different file' % alias)\n+ # we don't have an update yet, instead we delete first and then create the new record:\n+ self.drop_file_info(finfo.file)\n+ self.set_file_info(alias, size=finfo.size, modtime=finfo.modtime, graph=finfo.graph)\n \n def get_file_graph(self, file):\n \"\"\"Return the graph table name created from the data of 'file'.\n@@ -535,29 +575,44 @@ class SqliteStore(SqlStore):\n return table\n graphid += 1\n \n- def has_graph(self, file):\n- \"\"\"Return True if the KGTK graph represented by 'file' has already been imported\n- and is up-to-date. If this returns false, an obsolete graph table for 'file'\n- might still exist and will have to be removed before new data gets imported.\n+ def has_graph(self, file, alias=None):\n+ \"\"\"Return True if the KGTK graph represented/named by 'file' (or its 'alias' if not None)\n+ has already been imported and is up-to-date. If this returns false, an obsolete graph\n+ table for 'file' might exist and will have to be removed before new data gets imported.\n+ This returns True iff a matching file info was found (named by 'file' or 'alias'), and\n+ 'file' is an existing regular file whose properties match exactly what was previously loaded,\n+ or 'file' is not an existing regular file in which case its properties cannot be checked.\n+ This latter case allows us to delete large files used for import without losing the ability\n+ to query them, or to query files by using their alias only instead of a real filename.\n \"\"\"\n- file = os.path.realpath(file)\n- info = self.get_file_info(file)\n+ info = self.get_file_info(file, alias=alias)\n if info is not None:\n- if info.size != os.path.getsize(file):\n- return False\n- if info.modtime != os.path.getmtime(file):\n+ if self.is_standard_input(file):\n+ # we never reuse plain stdin, it needs to be aliased to a new name for that:\n return False\n+ if os.path.exists(file):\n+ if info.size != os.path.getsize(file):\n+ return False\n+ if info.modtime != os.path.getmtime(file):\n+ return False\n # don't check md5sum for now:\n return True\n return False\n \n- def add_graph(self, file):\n- if self.has_graph(file):\n+ def add_graph(self, file, alias=None):\n+ \"\"\"Import a graph from 'file' (and optionally named by 'alias') unless a matching\n+ graph has already been imported earlier according to 'has_graph' (which see).\n+ \"\"\"\n+ if self.has_graph(file, alias=alias):\n+ if alias is not None:\n+ # this allows us to do multiple renamings:\n+ self.set_file_alias(file, alias)\n return\n- file_info = self.get_file_info(file)\n+ file_info = self.get_file_info(file, alias=alias)\n if file_info is not None:\n # we already have an earlier version of the file in store, delete its graph data:\n self.drop_graph(file_info.graph)\n+ file = self.normalize_file_path(file)\n table = self.new_graph_table()\n oldsize = self.get_db_size()\n try:\n@@ -569,8 +624,13 @@ class SqliteStore(SqlStore):\n graphsize = self.get_db_size() - oldsize\n # this isn't really needed, but we store it for now - maybe use JSON-encoding instead:\n header = str(self.get_table_header(table))\n- self.set_file_info(file, size=os.path.getsize(file), modtime=os.path.getmtime(file), graph=table)\n+ if self.is_standard_input(file):\n+ self.set_file_info(file, size=0, modtime=time.time(), graph=table)\n+ else:\n+ self.set_file_info(file, size=os.path.getsize(file), modtime=os.path.getmtime(file), graph=table)\n self.set_graph_info(table, header=header, size=graphsize, acctime=time.time())\n+ if alias is not None:\n+ self.set_file_alias(file, alias)\n \n def drop_graph(self, table_name):\n \"\"\"Delete the graph 'table_name' and all its associated info records.\n@@ -593,6 +653,8 @@ class SqliteStore(SqlStore):\n handles conversion of different kinds of line endings, but 2x slower than direct import.\n \"\"\"\n self.log(1, 'IMPORT graph via csv.reader into table %s from %s ...' % (table, file))\n+ if self.is_standard_input(file):\n+ file = sys.stdin\n with open_to_read(file) as inp:\n csvreader = csv.reader(inp, dialect=None, delimiter='\\t', quoting=csv.QUOTE_NONE)\n header = next(csvreader)\n@@ -609,7 +671,10 @@ class SqliteStore(SqlStore):\n \"\"\"\n if os.name != 'posix':\n raise KGTKException(\"not yet implemented for this OS: '%s'\" % os.name)\n- if not isinstance(file, str) or not os.path.exists(file):\n+ # generalizing this to work for stdin would be possible, but it would significantly complicate\n+ # matters, since we also have to check for multi-char line endings at which point we can't\n+ # simply abort to 'import_graph_data_via_csv' but would have to buffer and resupply the read data:\n+ if not isinstance(file, str) or not os.path.exists(file) or self.is_standard_input(file):\n raise KGTKException('only implemented for existing, named files')\n # make sure we have the Unix commands we need:\n catcmd = get_cat_command(file, _piped=True)\n" } ]
ecaeb48e1fd7625c2a3f2854fb8cae18963f366d
usc-isi-i2/kgtk
07.04.2022 15:58:06
MIT License
Handle special translation needed for 'likelihood' and 'concat' functions Also slightly refactored and cleaned up translation of function expressions.
[ { "change_type": "MODIFY", "old_path": "kgtk/kypher/query.py", "new_path": "kgtk/kypher/query.py", "diff": "@@ -25,7 +25,7 @@ pp = pprint.PrettyPrinter(indent=4)\n # - support node property access without having to introduce the property variable in the\n # match clause first (e.g., y.salary in the multi-graph join example)\n # + support parameters in lists\n-# - support concat function (|| operator in sqlite)\n+# + support concat function (|| operator in sqlite)\n # - maybe support positional parameters $0, $1,...\n # - intelligent interpretation of ^ and $ when regex-matching to string literals?\n # - one can use kgtk_unstringify first to get to the text content\n@@ -435,6 +435,36 @@ class KgtkQuery(object):\n return graph, column, sql\n raise Exception(\"Unhandled property lookup expression: \" + str(expr))\n \n+ def function_call_to_sql(self, expr, state):\n+ function = expr.function\n+ normfun = function.upper()\n+ if normfun == 'CAST':\n+ # special-case SQLite CAST which isn't directly supported by Cypher:\n+ if len(expr.args) == 2 and isinstance(expr.args[1], parser.Variable):\n+ arg = self.expression_to_sql(expr.args[0], state)\n+ typ = expr.args[1].name\n+ return f'{function}({arg} AS {typ})'\n+ else:\n+ raise Exception(\"Illegal CAST expression\")\n+ elif normfun == 'LIKELIHOOD':\n+ # special-case SQLite LIKELIHOOD which needs a compile-time constant for its probability argument:\n+ if len(expr.args) == 2 and isinstance(expr.args[1], parser.Literal) and isinstance(expr.args[1].value, (int, float)):\n+ arg = self.expression_to_sql(expr.args[0], state)\n+ prob = expr.args[1].value\n+ return f'{function}({arg}, {prob})'\n+ else:\n+ raise Exception(\"Illegal LIKELIHOOD expression\")\n+ elif is_text_match_operator(function):\n+ return translate_text_match_op_to_sql(self, expr, state)\n+ args = [self.expression_to_sql(arg, state) for arg in expr.args]\n+ distinct = expr.distinct and 'DISTINCT ' or ''\n+ self.store.load_user_function(function, error=False)\n+ if normfun == 'CONCAT':\n+ # special-case Cypher's CONCAT function which is handled by SQLite's ||-operator:\n+ return f'({\" || \".join(args)})'\n+ else:\n+ return f'{function}({distinct}{\", \".join(args)})'\n+\n def expression_to_sql(self, expr, state):\n \"\"\"Translate a Kypher expression 'expr' into its SQL equivalent.\n \"\"\"\n@@ -487,21 +517,7 @@ class KgtkQuery(object):\n raise Exception(\"Unsupported operator: 'CASE'\")\n \n elif expr_type == parser.Call:\n- function = expr.function\n- if function.upper() == 'CAST':\n- # special-case SQLite CAST which isn't directly supported by Cypher:\n- if len(expr.args) == 2 and isinstance(expr.args[1], parser.Variable):\n- arg = self.expression_to_sql(expr.args[0], state)\n- typ = expr.args[1].name\n- return 'CAST(%s AS %s)' % (arg, typ)\n- else:\n- raise Exception(\"Illegal CAST expression\")\n- elif is_text_match_operator(function):\n- return translate_text_match_op_to_sql(self, expr, state)\n- args = [self.expression_to_sql(arg, state) for arg in expr.args]\n- distinct = expr.distinct and 'DISTINCT ' or ''\n- self.store.load_user_function(function, error=False)\n- return function + '(' + distinct + ', '.join(args) + ')'\n+ return self.function_call_to_sql(expr, state)\n \n elif expr_type == parser.Expression2:\n graph, column, sql = self.property_to_sql(expr, state)\n" } ]
c96f073d4a6141477a486faa80c760c958e2e119
usc-isi-i2/kgtk
06.06.2022 10:46:12
MIT License
Handle translation of new computed virtual graph patterns - uses new virtual graph function machinery for translation - ensures we don't try to create indexes on virtual tables
[ { "change_type": "MODIFY", "old_path": "kgtk/kypher/query.py", "new_path": "kgtk/kypher/query.py", "diff": "@@ -319,6 +319,17 @@ class KgtkQuery(object):\n node1 = clause[0]\n rel = clause[1]\n node2 = clause[2]\n+\n+ if rel.labels is not None and SqlFunction.is_virtual_graph(rel.labels[0]):\n+ # special-case translation of virtual graph pattern clauses:\n+ vgraph = rel.labels[0]\n+ vgraphfn = SqlFunction.get_function(vgraph, store=self.store)\n+ vgraphfn.translate_call_to_sql(self, clause, state)\n+ # in case the translator already called 'load':\n+ state.register_vtable(vgraphfn.get_name(), vgraphfn)\n+ vgraphfn.load()\n+ state.register_vtable(vgraphfn.get_name(), vgraphfn)\n+ return\n \n node1col = self.get_node1_column(graph)\n if node1.labels is not None:\n@@ -827,12 +838,20 @@ class KgtkQuery(object):\n restrictions = state.get_match_clause_restrictions(match_clause)\n if len(joins) > 0:\n for (g1, c1), (g2, c2) in joins:\n- indexes.add((self.graph_alias_to_graph(g1), c1))\n- indexes.add((self.graph_alias_to_graph(g2), c2))\n+ g1 = self.graph_alias_to_graph(g1)\n+ g2 = self.graph_alias_to_graph(g2)\n+ # do not create any indexes on virtual tables:\n+ if state.lookup_vtable(g1) is None:\n+ indexes.add((g1, c1))\n+ if state.lookup_vtable(g2) is None:\n+ indexes.add((g2, c2))\n if len(restrictions) > 0:\n # even if we have joins, we might need additional indexes on restricted columns:\n for (g, c), val in restrictions:\n- indexes.add((self.graph_alias_to_graph(g), c))\n+ # do not create any indexes on virtual tables:\n+ if state.lookup_vtable(g) is None:\n+ g = self.graph_alias_to_graph(g)\n+ indexes.add((g, c))\n return indexes\n \n def get_explicit_graph_index_specs(self):\n@@ -984,6 +1003,7 @@ class TranslationState(object):\n self.literal_map = {} # maps Kypher literals onto parameter placeholders\n self.variable_map = {} # maps Kypher variables onto representative (graph, col) SQL columns\n self.alias_map = {} # maps tables to their aliases and vice versa\n+ self.vtable_map = {} # maps referenced virtual table names to their SqlFunction object\n self.match_clause = None # match clause we are currently processing\n self.match_clause_info = {} # maps match clauses onto joins, restrictions, etc. encountered\n self.sql = None # final SQL translation of 'query'\n@@ -1052,6 +1072,21 @@ class TranslationState(object):\n raise Exception('Internal error: alias map exhausted')\n else:\n raise Exception(f'No aliases defined for {table}')\n+\n+ def get_vtable_map(self):\n+ return self.vtable_map\n+\n+ def register_vtable(self, vtable_name, sql_func):\n+ \"\"\"Register that the virtual table 'vtable_name' has been referenced and\n+ is implemented by SqlFunction 'sql_func'.\n+ \"\"\"\n+ self.vtable_map[vtable_name] = sql_func\n+\n+ def lookup_vtable(self, vtable_name):\n+ \"\"\"Lookup the SqlFunction implementing the virtual table 'vtable_name'.\n+ Return None if 'vtable_name' is not a registered virtual table.\n+ \"\"\"\n+ return self.vtable_map.get(vtable_name)\n \n def get_match_clause(self):\n \"\"\"Return the current match clause.\"\"\"\n@@ -1196,6 +1231,7 @@ class TranslationState(object):\n ### Text match support\n \n # This is a bit messy and idiosyncratic, so we are keeping it outside the regular translator.\n+# TO DO: see if we can refactor and package this better with the new SqlFunction API\n \n TEXTMATCH_OPERATORS = {'TEXTMATCH': 'match', 'TEXTLIKE': 'like', 'TEXTGLOB': 'glob',\n 'MATCHSCORE': 'score', 'BM25': 'score'}\n" } ]
6afb6c9edeeeb48ef04fa62f00cca10837b2d349
usc-isi-i2/kgtk
13.09.2022 15:46:52
MIT License
Support dont-optimize switch on all or individual match clauses This will use a CROSS JOIN operator where appropriate which in SQLite will disable the query optimizer for those joins and process the joins in the order listed.
[ { "change_type": "MODIFY", "old_path": "kgtk/kypher/query.py", "new_path": "kgtk/kypher/query.py", "diff": "@@ -162,7 +162,8 @@ class KgtkQuery(object):\n def __init__(self, files, store, options=None, query=None,\n match='()', where=None, optionals=None, with_=None,\n ret='*', order=None, skip=None, limit=None, multi=None,\n- parameters={}, index='auto', force=False, loglevel=0):\n+ parameters={}, index='auto', force=False, dont_optimize=False,\n+ loglevel=0):\n self.options = options or {}\n self.store = store\n self.loglevel = loglevel\n@@ -209,6 +210,7 @@ class KgtkQuery(object):\n if multi is not None and multi < 1:\n raise Exception(f'Illegal multi-edge value: {multi}')\n self.multi_edge = multi\n+ self.dont_optimize = dont_optimize\n \n # process/import files after we parsed the query, so we get syntax errors right away:\n self.files = []\n@@ -304,6 +306,21 @@ class KgtkQuery(object):\n node1 = clause[0]\n return node1._graph_alias\n \n+ def get_pattern_clause_match_clause(self, clause):\n+ \"\"\"Return the match clause this pattern 'clause' belongs to,\n+ raise an error if it cannot be found.\n+ \"\"\"\n+ node1 = clause[0]\n+ if hasattr(node1, '_match_clause'):\n+ return node1._match_clause\n+ for mclause in self.get_match_clauses():\n+ for pclause in mclause.get_pattern_clauses():\n+ # currently we rely on object identity to make the connection:\n+ if pclause is clause:\n+ node1._match_clause = mclause\n+ return mclause\n+ raise KGTKException('failed to link pattern clause to match clause')\n+ \n # in case we have aliases which could be different in every graph, stubs for now:\n def get_node1_column(self, graph):\n return 'node1'\n@@ -643,6 +660,9 @@ class KgtkQuery(object):\n \"\"\"\n i = 1\n for match_clause in self.get_match_clauses():\n+ # we initialize this here for now; conceivably we could also invent\n+ # some per-clause syntax for it, for example, --match! and --opt!:\n+ match_clause.dont_optimize = self.dont_optimize\n for clause in match_clause.get_pattern_clauses():\n graph = self.get_pattern_clause_graph(clause)\n # create per-clause graph table alias for self-joins (keep the DB qualifier\n@@ -656,11 +676,17 @@ class KgtkQuery(object):\n \"\"\"Return the set of graph table names with aliases referenced by this 'match_clause'.\n \"\"\"\n graphs = set()\n+ graphs_list = []\n for clause in match_clause.get_pattern_clauses():\n graph_table = self.get_pattern_clause_graph(clause)\n graph_alias = self.get_pattern_clause_graph_alias(clause)\n- graphs.add((graph_table, graph_alias))\n- return graphs\n+ graph = (graph_table, graph_alias)\n+ if graph not in graphs:\n+ graphs.add(graph)\n+ graphs_list.append(graph)\n+ # extra logic so we preserve the original graph order in the standard case\n+ # to not upset the optimization of any existing queries in the wild:\n+ return graphs_list if match_clause.dont_optimize else graphs\n \n def get_all_match_clause_graphs(self):\n \"\"\"Return the set of graph table names with aliases referenced by this query.\n@@ -673,10 +699,19 @@ class KgtkQuery(object):\n graphs.add((graph_table, graph_alias))\n return graphs\n \n- def graph_names_to_sql(self, graphs):\n+ def graph_names_to_sql_join(self, graphs, dont_optimize=False, append=False):\n \"\"\"Translate a list of (graph, alias) pairs into an SQL table list with aliases.\n+ Choose the appropriate INNER or CROSS join operator based on 'dont_optimize'\n+ (note that this is an SQLite-specific idiom to disable the query optimizer).\n+ If 'append', append to an existing join starting with the respective join operator.\n \"\"\"\n- return ', '.join([g + ' AS ' + a for g, a in sorted(listify(graphs))])\n+ if not dont_optimize:\n+ # preserve the original graph order in the standard case to not\n+ # upset the optimization of any existing queries in the wild:\n+ graphs = sorted(listify(graphs))\n+ appendop = (dont_optimize and 'CROSS JOIN ' or 'INNER JOIN ') if append else ''\n+ joinop = (dont_optimize and ' CROSS JOIN ' or ', ')\n+ return appendop + joinop.join([g + ' AS ' + a for g, a in graphs])\n \n def match_clause_to_sql(self, match_clause, state):\n \"\"\"Translate a strict or optional 'match_clause' into a set of source tables,\n@@ -686,7 +721,11 @@ class KgtkQuery(object):\n a bit wild and wooly and will likely need further refinement down the road.\n \"\"\"\n state.set_match_clause(match_clause)\n- clause_sources = sorted(list(self.get_match_clause_graphs(match_clause)))\n+ clause_sources = list(self.get_match_clause_graphs(match_clause))\n+ if not match_clause.dont_optimize:\n+ # preserve the original graph order in the standard case to not\n+ # upset the optimization of any existing queries in the wild:\n+ clause_sources = sorted(clause_sources)\n primary_source = clause_sources[0]\n sources = clause_sources.copy()\n \n@@ -720,6 +759,12 @@ class KgtkQuery(object):\n internal_condition.append(where)\n internal_condition = '\\n AND '.join(internal_condition)\n external_condition = '\\n AND '.join(external_condition)\n+\n+ if match_clause.dont_optimize:\n+ # order joined tables in the order they appear in clause_sources:\n+ joined = [(graph, clause_sources.index(graph)) for graph in joined]\n+ joined.sort(key=lambda x: x[1])\n+ joined = [x[0] for x in joined]\n \n return sources, joined, internal_condition, external_condition\n \n@@ -774,9 +819,9 @@ class KgtkQuery(object):\n assert not ext_condition, 'INTERNAL ERROR: unexpected match clause'\n \n where = []\n- query.write('\\nFROM %s' % self.graph_names_to_sql(sources + aux_tables))\n+ query.write(f'\\nFROM {self.graph_names_to_sql_join(sources + aux_tables)}')\n if joined:\n- query.write('\\nINNER JOIN %s' % self.graph_names_to_sql(joined))\n+ query.write(f'\\n{self.graph_names_to_sql_join(joined, dont_optimize=self.match_clause.dont_optimize, append=True)}')\n if int_condition:\n if joined:\n query.write('\\nON %s' % int_condition)\n@@ -790,12 +835,13 @@ class KgtkQuery(object):\n aux_tables = list(state.get_match_clause_aux_tables(opt_clause))\n if len(sources) > 1 and not self.force:\n raise Exception('optional clause generates a cross-product which can be very expensive, use --force to override')\n+ # FIXME: nested optionals are broken, for one, the aliases becomes shielded and inaccessible in the ext_condition:\n nested = len(joined) > 0\n- query.write('\\nLEFT JOIN %s%s' % (nested and '(' or '', self.graph_names_to_sql(sources + aux_tables)))\n+ query.write(f'\\nLEFT JOIN {\"(\" if nested else \"\"}{self.graph_names_to_sql_join(sources + aux_tables)}')\n if nested:\n- query.write('\\n INNER JOIN %s' % self.graph_names_to_sql(joined))\n+ query.write(f'\\n {self.graph_names_to_sql_join(joined, dont_optimize=opt_clause.dont_optimize, append=True)}')\n query.write('\\n ON %s)' % int_condition.replace('\\n', '\\n '))\n- query.write('\\nON %s' % ext_condition)\n+ query.write(f'\\nON {ext_condition}')\n else:\n query.write('\\nON %s' % '\\n AND '.join(listify(ext_condition) + listify(int_condition)))\n \n" } ]
350f45431d4dbc93ac774e091150c8593a8b458e
gammapy/gammapy
01.08.2018 17:10:21
BSD 3-Clause New or Revised License
Change to use argument name "geom" consistently. Previously we had a mix of "geom" and "ref_geom" in Gammapy.
[ { "change_type": "MODIFY", "old_path": "gammapy/astro/darkmatter/tests/test_utils.py", "new_path": "gammapy/astro/darkmatter/tests/test_utils.py", "diff": "@@ -14,7 +14,7 @@ def geom():\n \n @pytest.fixture(scope=\"session\")\n def jfact():\n- jfactory = JFactory(ref_geom=geom(), profile=profiles.NFWProfile(), distance=8 * u.kpc)\n+ jfactory = JFactory(geom=geom(), profile=profiles.NFWProfile(), distance=8 * u.kpc)\n return jfactory.compute_jfactor()\n \n \n" }, { "change_type": "MODIFY", "old_path": "gammapy/astro/darkmatter/utils.py", "new_path": "gammapy/astro/darkmatter/utils.py", "diff": "@@ -20,7 +20,7 @@ class JFactory(object):\n \n Parameters\n ----------\n- ref_geom : `~gammapy.maps.WcsGeom`\n+ geom : `~gammapy.maps.WcsGeom`\n Reference geometry\n profile : `~gammapy.astro.darkmatter.profiles.DMProfile`\n Dark matter profile\n@@ -28,8 +28,8 @@ class JFactory(object):\n Distance to convert angular scale of the map\n \"\"\"\n \n- def __init__(self, ref_geom, profile, distance):\n- self.ref_geom = ref_geom\n+ def __init__(self, geom, profile, distance):\n+ self.geom = geom\n self.profile = profile\n self.distance = distance\n \n@@ -42,7 +42,7 @@ class JFactory(object):\n \n TODO: Needs to be implemented more efficiently\n \"\"\"\n- separation = self.ref_geom.separation(self.ref_geom.center_skydir)\n+ separation = self.geom.separation(self.geom.center_skydir)\n rmin = separation.rad * self.distance\n rmax = self.distance\n val = [self.profile.integral(_, rmax) for _ in rmin.flatten()]\n@@ -59,7 +59,7 @@ class JFactory(object):\n \n \"\"\"\n diff_jfact = self.compute_differential_jfactor()\n- jfact = diff_jfact * self.ref_geom.to_image().solid_angle()\n+ jfact = diff_jfact * self.geom.to_image().solid_angle()\n return jfact\n \n \n" }, { "change_type": "MODIFY", "old_path": "gammapy/cube/background.py", "new_path": "gammapy/cube/background.py", "diff": "@@ -11,7 +11,7 @@ __all__ = [\n ]\n \n \n-def make_map_background_irf(pointing, livetime, bkg, ref_geom, offset_max, n_integration_bins=1):\n+def make_map_background_irf(pointing, livetime, bkg, geom, offset_max, n_integration_bins=1):\n \"\"\"Compute background map from background IRFs.\n \n TODO: Call a method on bkg that returns integral over energy bin directly\n@@ -25,7 +25,7 @@ def make_map_background_irf(pointing, livetime, bkg, ref_geom, offset_max, n_int\n Observation livetime\n bkg : `~gammapy.irf.Background3D`\n Background rate model\n- ref_geom : `~gammapy.maps.WcsGeom`\n+ geom : `~gammapy.maps.WcsGeom`\n Reference geometry\n offset_max : `~astropy.coordinates.Angle`\n Maximum field of view offset\n@@ -41,9 +41,9 @@ def make_map_background_irf(pointing, livetime, bkg, ref_geom, offset_max, n_int\n # TODO: properly transform FOV to sky coordinates\n # For now we assume the background is radially symmetric\n \n- energy_axis = ref_geom.axes[0]\n+ energy_axis = geom.axes[0]\n # Compute offsets of all pixels\n- map_coord = ref_geom.get_coord()\n+ map_coord = geom.get_coord()\n # Retrieve energies from map coordinates\n energy_reco = map_coord[energy_axis.name] * energy_axis.unit\n # TODO: go from SkyCoord to FOV coordinates. Here assume symmetric geometry for fov_lon, fov_lat\n@@ -59,7 +59,7 @@ def make_map_background_irf(pointing, livetime, bkg, ref_geom, offset_max, n_int\n n_integration_bins=n_integration_bins,\n )\n \n- d_omega = ref_geom.solid_angle()\n+ d_omega = geom.solid_angle()\n data = (data_int * d_omega * livetime).to('').value\n \n # Put exposure outside offset max to zero\n@@ -67,7 +67,7 @@ def make_map_background_irf(pointing, livetime, bkg, ref_geom, offset_max, n_int\n offset = np.sqrt(fov_lon ** 2 + fov_lat ** 2)\n data[:, offset[0, :, :] >= offset_max] = 0\n \n- return WcsNDMap(ref_geom, data=data)\n+ return WcsNDMap(geom, data=data)\n \n \n def make_map_background_fov(acceptance_map, counts_map, exclusion_mask):\n" }, { "change_type": "MODIFY", "old_path": "gammapy/cube/counts.py", "new_path": "gammapy/cube/counts.py", "diff": "@@ -44,7 +44,7 @@ def fill_map_counts(count_map, event_list):\n count_map.fill_by_coord(coord_dict)\n \n \n-def make_map_counts(events, ref_geom, pointing, offset_max):\n+def make_map_counts(events, geom, pointing, offset_max):\n \"\"\"Build a WcsNDMap (space - energy) with events from an EventList.\n \n The energy of the events is used for the non-spatial axis.\n@@ -53,7 +53,7 @@ def make_map_counts(events, ref_geom, pointing, offset_max):\n ----------\n events : `~gammapy.data.EventList`\n Event list\n- ref_geom : `~gammapy.maps.WcsGeom`\n+ geom : `~gammapy.maps.WcsGeom`\n Reference WcsGeom object used to define geometry (space - energy)\n pointing : `~astropy.coordinates.SkyCoord`\n Pointing direction\n@@ -65,11 +65,11 @@ def make_map_counts(events, ref_geom, pointing, offset_max):\n cntmap : `~gammapy.maps.WcsNDMap`\n Count cube (3D) in true energy bins\n \"\"\"\n- counts_map = WcsNDMap(ref_geom)\n+ counts_map = WcsNDMap(geom)\n fill_map_counts(counts_map, events)\n \n # Compute and apply FOV offset mask\n- offset = ref_geom.separation(pointing)\n+ offset = geom.separation(pointing)\n offset_mask = offset >= offset_max\n counts_map.data[:, offset_mask] = 0\n \n" }, { "change_type": "MODIFY", "old_path": "gammapy/cube/exposure.py", "new_path": "gammapy/cube/exposure.py", "diff": "@@ -8,7 +8,7 @@ __all__ = [\n ]\n \n \n-def make_map_exposure_true_energy(pointing, livetime, aeff, ref_geom, offset_max):\n+def make_map_exposure_true_energy(pointing, livetime, aeff, geom, offset_max):\n \"\"\"Compute exposure WcsNDMap in true energy (i.e. not convolved by Edisp).\n \n Parameters\n@@ -19,7 +19,7 @@ def make_map_exposure_true_energy(pointing, livetime, aeff, ref_geom, offset_max\n Livetime\n aeff : `~gammapy.irf.EffectiveAreaTable2D`\n Effective area table\n- ref_geom : `~gammapy.maps.WcsGeom`\n+ geom : `~gammapy.maps.WcsGeom`\n Reference WcsGeom object used to define geometry (space - energy)\n offset_max : `~astropy.coordinates.Angle`\n Maximum field of view offset.\n@@ -29,12 +29,12 @@ def make_map_exposure_true_energy(pointing, livetime, aeff, ref_geom, offset_max\n expmap : `~gammapy.maps.WcsNDMap`\n Exposure cube (3D) in true energy bins\n \"\"\"\n- offset = ref_geom.separation(pointing)\n+ offset = geom.separation(pointing)\n \n # Retrieve energies from WcsNDMap\n # Note this would require a log_center from the geometry\n # Or even better edges, but WcsNDmap does not really allows it.\n- energy = ref_geom.axes[0].center * ref_geom.axes[0].unit\n+ energy = geom.axes[0].center * geom.axes[0].unit\n \n exposure = aeff.data.evaluate(offset=offset, energy=energy)\n exposure *= livetime\n@@ -50,4 +50,4 @@ def make_map_exposure_true_energy(pointing, livetime, aeff, ref_geom, offset_max\n \n data = exposure.to('m2 s')\n \n- return WcsNDMap(ref_geom, data)\n+ return WcsNDMap(geom, data)\n" }, { "change_type": "MODIFY", "old_path": "gammapy/cube/make.py", "new_path": "gammapy/cube/make.py", "diff": "@@ -21,7 +21,7 @@ class MapMaker(object):\n \n Parameters\n ----------\n- ref_geom : `~gammapy.maps.WcsGeom`\n+ geom : `~gammapy.maps.WcsGeom`\n Reference image geometry\n offset_max : `~astropy.coordinates.Angle`\n Maximum offset angle\n@@ -31,19 +31,19 @@ class MapMaker(object):\n unless you want only fully contained observations to be added to the map\n \"\"\"\n \n- def __init__(self, ref_geom, offset_max, cutout_mode=\"trim\"):\n+ def __init__(self, geom, offset_max, cutout_mode=\"trim\"):\n self.offset_max = Angle(offset_max)\n- self.ref_geom = ref_geom\n+ self.geom = geom\n \n # We instantiate the end products of the MakeMaps class\n- self.counts_map = WcsNDMap(self.ref_geom)\n+ self.counts_map = WcsNDMap(self.geom)\n \n- self.exposure_map = WcsNDMap(self.ref_geom, unit=\"m2 s\")\n+ self.exposure_map = WcsNDMap(self.geom, unit=\"m2 s\")\n \n- self.background_map = WcsNDMap(self.ref_geom)\n+ self.background_map = WcsNDMap(self.geom)\n \n # We will need this general exclusion mask for the analysis\n- self.exclusion_map = WcsNDMap(self.ref_geom)\n+ self.exclusion_map = WcsNDMap(self.geom)\n self.exclusion_map.data += 1\n \n self.cutout_mode = cutout_mode\n" }, { "change_type": "MODIFY", "old_path": "gammapy/cube/psf_map.py", "new_path": "gammapy/cube/psf_map.py", "diff": "@@ -13,7 +13,7 @@ __all__ = [\n ]\n \n \n-def make_psf_map(psf, pointing, ref_geom, max_offset):\n+def make_psf_map(psf, pointing, geom, max_offset):\n \"\"\"Make a psf map for a single observation\n \n Expected axes : rad and true energy in this specific order\n@@ -25,7 +25,7 @@ def make_psf_map(psf, pointing, ref_geom, max_offset):\n the PSF IRF\n pointing : `~astropy.coordinates.SkyCoord`\n the pointing direction\n- ref_geom : `~gammapy.maps.MapGeom`\n+ geom : `~gammapy.maps.MapGeom`\n the map geom to be used. It provides the target geometry.\n rad and true energy axes should be given in this specific order.\n max_offset : `~astropy.coordinates.Angle`\n@@ -36,14 +36,14 @@ def make_psf_map(psf, pointing, ref_geom, max_offset):\n psfmap : `~gammapy.cube.PSFMap`\n the resulting PSF map\n \"\"\"\n- energy_axis = ref_geom.get_axis_by_name('energy_true')\n+ energy_axis = geom.get_axis_by_name('energy_true')\n energy = energy_axis.center * energy_axis.unit\n \n- rad_axis = ref_geom.get_axis_by_name('theta')\n+ rad_axis = geom.get_axis_by_name('theta')\n rad = Angle(rad_axis.center, unit=rad_axis.unit)\n \n # Compute separations with pointing position\n- separations = pointing.separation(ref_geom.to_image().get_coord().skycoord)\n+ separations = pointing.separation(geom.to_image().get_coord().skycoord)\n valid = np.where(separations < max_offset)\n \n # Compute PSF values\n@@ -53,7 +53,7 @@ def make_psf_map(psf, pointing, ref_geom, max_offset):\n psf_values = np.transpose(psf_values, axes=(2, 0, 1))\n \n # Create Map and fill relevant entries\n- psfmap = Map.from_geom(ref_geom, unit='sr-1')\n+ psfmap = Map.from_geom(geom, unit='sr-1')\n psfmap.data[:, :, valid[0], valid[1]] += psf_values.to(psfmap.unit).value\n \n return PSFMap(psfmap)\n" }, { "change_type": "MODIFY", "old_path": "gammapy/cube/tests/test_exposure.py", "new_path": "gammapy/cube/tests/test_exposure.py", "diff": "@@ -30,7 +30,7 @@ def test_make_map_exposure_true_energy(aeff, counts_cube):\n pointing=SkyCoord(83.633, 21.514, unit='deg'),\n livetime='1581.17 s',\n aeff=aeff,\n- ref_geom=counts_cube.geom,\n+ geom=counts_cube.geom,\n offset_max=Angle('2.2 deg'),\n )\n \n" }, { "change_type": "MODIFY", "old_path": "gammapy/cube/tests/test_fit.py", "new_path": "gammapy/cube/tests/test_fit.py", "diff": "@@ -38,7 +38,7 @@ def exposure(geom):\n pointing=SkyCoord(1, 0.5, unit='deg', frame='galactic'),\n livetime='1 hour',\n aeff=aeff,\n- ref_geom=geom,\n+ geom=geom,\n offset_max=offset_max,\n )\n return exposure_map\n" } ]
537996597fb34fb01cf0a14c3618f7fe1258ff3c
gammapy/gammapy
22.01.2020 12:06:54
BSD 3-Clause New or Revised License
Add auto-generated unique names - add auto-generated unique names for model and dataset on init and copy - force unicity on datasets and skymodels
[ { "change_type": "MODIFY", "old_path": "gammapy/cube/fit.py", "new_path": "gammapy/cube/fit.py", "diff": "@@ -11,7 +11,7 @@ from gammapy.cube.edisp_map import EDispMap\n from gammapy.cube.psf_kernel import PSFKernel\n from gammapy.cube.psf_map import PSFMap\n from gammapy.data import GTI\n-from gammapy.irf import EffectiveAreaTable, EDispKernel\n+from gammapy.irf import EDispKernel, EffectiveAreaTable\n from gammapy.maps import Map, MapAxis\n from gammapy.modeling import Dataset, Parameters\n from gammapy.modeling.models import BackgroundModel, SkyModel, SkyModels\n@@ -19,7 +19,7 @@ from gammapy.modeling.parameter import _get_parameters_str\n from gammapy.spectrum import SpectrumDataset, SpectrumDatasetOnOff\n from gammapy.stats import cash, cash_sum_cython, wstat\n from gammapy.utils.random import get_random_state\n-from gammapy.utils.scripts import make_path\n+from gammapy.utils.scripts import make_name, make_path\n from .exposure import _map_spectrum_weight\n \n __all__ = [\"MapDataset\", \"MapDatasetOnOff\"]\n@@ -81,7 +81,7 @@ class MapDataset(Dataset):\n psf=None,\n edisp=None,\n background_model=None,\n- name=\"\",\n+ name=None,\n evaluation_mode=\"local\",\n mask_safe=None,\n gti=None,\n@@ -100,10 +100,14 @@ class MapDataset(Dataset):\n self.edisp = edisp\n self.background_model = background_model\n self.models = models\n- self.name = name\n self.mask_safe = mask_safe\n self.gti = gti\n \n+ if name is None:\n+ self.name = make_name()\n+ else:\n+ self.name = name\n+\n # check whether a reference geom is defined\n _ = self._geom\n \n@@ -257,8 +261,10 @@ class MapDataset(Dataset):\n elif self.mask_fit is not None:\n return self.mask_fit.geom\n else:\n- raise ValueError(\"Either 'counts', 'background_model', 'mask_fit'\"\n- \" or 'mask_safe' must be defined.\")\n+ raise ValueError(\n+ \"Either 'counts', 'background_model', 'mask_fit'\"\n+ \" or 'mask_safe' must be defined.\"\n+ )\n \n @property\n def data_shape(self):\n@@ -814,7 +820,7 @@ class MapDataset(Dataset):\n \n The model is not exported to the ~gammapy.spectrum.SpectrumDataset.\n It must be set after the dataset extraction.\n- \n+\n Parameters\n ----------\n on_region : `~regions.SkyRegion`\n@@ -860,7 +866,9 @@ class MapDataset(Dataset):\n raise ValueError(\"No PSFMap set. Containement correction impossible\")\n else:\n psf = self.psf.get_energy_dependent_table_psf(on_region.center)\n- containment = psf.containment(kwargs[\"aeff\"].energy.center, on_region.radius)\n+ containment = psf.containment(\n+ kwargs[\"aeff\"].energy.center, on_region.radius\n+ )\n kwargs[\"aeff\"].data.data *= containment.squeeze()\n \n if self.edisp is not None:\n@@ -868,9 +876,7 @@ class MapDataset(Dataset):\n edisp = self.edisp\n else:\n axis = self._geom.get_axis_by_name(\"energy\")\n- edisp = self.edisp.get_edisp_kernel(\n- on_region.center, e_reco=axis.edges\n- )\n+ edisp = self.edisp.get_edisp_kernel(on_region.center, e_reco=axis.edges)\n kwargs[\"edisp\"] = edisp\n \n return SpectrumDataset(**kwargs)\n@@ -1301,9 +1307,7 @@ class MapDatasetOnOff(MapDataset):\n kwargs[\"acceptance\"] = Map.from_hdulist(hdulist, hdu=\"acceptance\")\n \n if \"ACCEPTANCE_OFF\" in hdulist:\n- kwargs[\"acceptance_off\"] = Map.from_hdulist(\n- hdulist, hdu=\"acceptance_off\"\n- )\n+ kwargs[\"acceptance_off\"] = Map.from_hdulist(hdulist, hdu=\"acceptance_off\")\n \n if \"EXPOSURE\" in hdulist:\n kwargs[\"exposure\"] = Map.from_hdulist(hdulist, hdu=\"exposure\")\n@@ -1368,11 +1372,11 @@ class MapDatasetOnOff(MapDataset):\n if self.acceptance is not None:\n kwargs[\"acceptance\"] = self.acceptance.get_spectrum(on_region, np.mean)\n background = self.background.get_spectrum(on_region, np.sum)\n- kwargs[\"acceptance_off\"] = kwargs[\"acceptance\"] * kwargs[\"counts_off\"] / background\n+ kwargs[\"acceptance_off\"] = (\n+ kwargs[\"acceptance\"] * kwargs[\"counts_off\"] / background\n+ )\n \n- return SpectrumDatasetOnOff.from_spectrum_dataset(\n- dataset=dataset, **kwargs\n- )\n+ return SpectrumDatasetOnOff.from_spectrum_dataset(dataset=dataset, **kwargs)\n \n def cutout(self, position, width, mode=\"trim\"):\n \"\"\"Cutout map dataset.\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/datasets.py", "new_path": "gammapy/modeling/datasets.py", "diff": "@@ -2,8 +2,9 @@\n import abc\n import collections.abc\n import copy\n+from warnings import warn\n import numpy as np\n-from gammapy.utils.scripts import make_path, read_yaml, write_yaml\n+from gammapy.utils.scripts import make_name, make_path, read_yaml, write_yaml\n from gammapy.utils.table import table_from_row_data\n from ..maps import WcsNDMap\n from .parameter import Parameters\n@@ -63,9 +64,14 @@ class Dataset(abc.ABC):\n def stat_array(self):\n \"\"\"Statistic array, one value per data point.\"\"\"\n \n- def copy(self):\n+ def copy(self, name=None):\n \"\"\"A deep copy.\"\"\"\n- return copy.deepcopy(self)\n+ new = copy.deepcopy(self)\n+ if name is None:\n+ new.name = make_name()\n+ else:\n+ new.name = name\n+ return new\n \n @staticmethod\n def _compute_residuals(data, model, method=\"diff\"):\n@@ -95,12 +101,21 @@ class Datasets(collections.abc.Sequence):\n \n def __init__(self, datasets):\n if isinstance(datasets, Datasets):\n- self._datasets = list(datasets)\n+ datasets = list(datasets)\n elif isinstance(datasets, list):\n- self._datasets = datasets\n+ pass\n else:\n raise TypeError(f\"Invalid type: {datasets!r}\")\n \n+ unique_names = []\n+ for dataset in datasets:\n+ while dataset.name in unique_names:\n+ dataset.name = make_name() # replace duplicate\n+ warn(\"Dateset names must be unique, auto-replaced duplicates\")\n+ unique_names.append(dataset.name)\n+\n+ self._datasets = datasets\n+\n @property\n def parameters(self):\n \"\"\"Unique parameters (`~gammapy.modeling.Parameters`).\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/model.py", "new_path": "gammapy/modeling/model.py", "diff": "@@ -1,6 +1,7 @@\n # Licensed under a 3-clause BSD style license - see LICENSE.rst\n import copy\n import astropy.units as u\n+from gammapy.utils.scripts import make_name\n from .parameter import Parameter, Parameters\n \n __all__ = [\"Model\"]\n@@ -54,9 +55,14 @@ class Model:\n \"\"\"Parameters (`~gammapy.modeling.Parameters`)\"\"\"\n return self._parameters\n \n- def copy(self):\n+ def copy(self, name=None):\n \"\"\"A deep copy.\"\"\"\n- return copy.deepcopy(self)\n+ new = copy.deepcopy(self)\n+ if name is None:\n+ new.name = make_name()\n+ else:\n+ new.name = name\n+ return new\n \n def __str__(self):\n return f\"{self.__class__.__name__}\\n\\n{self.parameters.to_table()}\"\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/cube.py", "new_path": "gammapy/modeling/models/cube.py", "diff": "@@ -3,12 +3,13 @@\n import collections.abc\n import copy\n from pathlib import Path\n+from warnings import warn\n import numpy as np\n import astropy.units as u\n import yaml\n from gammapy.maps import Map\n from gammapy.modeling import Model, Parameter, Parameters\n-from gammapy.utils.scripts import make_path\n+from gammapy.utils.scripts import make_name, make_path\n \n \n class SkyModelBase(Model):\n@@ -52,6 +53,13 @@ class SkyModels(collections.abc.Sequence):\n else:\n raise TypeError(f\"Invalid type: {skymodels!r}\")\n \n+ unique_names = []\n+ for model in models:\n+ while model.name in unique_names:\n+ model.name = make_name() # replace duplicate\n+ warn(\"SkyModel names must be unique, auto-replaced duplicates\")\n+ unique_names.append(model.name)\n+\n self._skymodels = models\n \n @property\n@@ -141,14 +149,18 @@ class SkyModel(SkyModelBase):\n \n tag = \"SkyModel\"\n \n- def __init__(self, spectral_model, spatial_model=None, name=\"source\"):\n- self.name = name\n+ def __init__(self, spectral_model, spatial_model=None, name=None):\n self.spatial_model = spatial_model\n self.spectral_model = spectral_model\n super().__init__()\n # TODO: this hack is needed for compound models to work\n self.__dict__.pop(\"_parameters\")\n \n+ if name is None:\n+ self.name = make_name()\n+ else:\n+ self.name = name\n+\n @property\n def parameters(self):\n parameters = []\n@@ -253,7 +265,7 @@ class SkyModel(SkyModelBase):\n \n kwargs.setdefault(\"spatial_model\", spatial_model)\n kwargs.setdefault(\"spectral_model\", self.spectral_model.copy())\n- kwargs.setdefault(\"name\", self.name + \"-copy\")\n+ kwargs.setdefault(\"name\", make_name())\n return self.__class__(**kwargs)\n \n def to_dict(self):\n@@ -327,10 +339,14 @@ class SkyDiffuseCube(SkyModelBase):\n reference=reference.quantity,\n meta=None,\n interp_kwargs=None,\n- name=\"diffuse\",\n+ name=None,\n filename=None,\n ):\n- self.name = name\n+\n+ if name is None:\n+ self.name = make_name()\n+ else:\n+ self.name = name\n axis = map.geom.get_axis_by_name(\"energy\")\n \n if axis.node_type != \"center\":\n@@ -463,7 +479,7 @@ class BackgroundModel(Model):\n norm=norm.quantity,\n tilt=tilt.quantity,\n reference=reference.quantity,\n- name=\"background\",\n+ name=None,\n filename=None,\n ):\n axis = map.geom.get_axis_by_name(\"energy\")\n@@ -471,8 +487,13 @@ class BackgroundModel(Model):\n raise ValueError('Need an integrated map, energy axis node_type=\"edges\"')\n \n self.map = map\n- self.name = name\n+\n+ if name is None:\n+ self.name = make_name()\n+ else:\n+ self.name = name\n self.filename = filename\n+\n super().__init__(norm=norm, tilt=tilt, reference=reference)\n \n @property\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/tests/data/make.py", "new_path": "gammapy/modeling/tests/data/make.py", "diff": "@@ -24,7 +24,7 @@ DATA_PATH = Path(\"./\")\n \n def make_example_2():\n spatial = GaussianSpatialModel(lon_0=\"0 deg\", lat_0=\"0 deg\", sigma=\"1 deg\")\n- model = SkyModel(spatial, PowerLawSpectralModel())\n+ model = SkyModel(PowerLawSpectralModel(), spatial)\n models = SkyModels([model])\n models.write(DATA_PATH / \"example2.yaml\")\n \n" }, { "change_type": "MODIFY", "old_path": "gammapy/spectrum/dataset.py", "new_path": "gammapy/spectrum/dataset.py", "diff": "@@ -5,13 +5,13 @@ from astropy import units as u\n from astropy.io import fits\n from astropy.table import Table\n from gammapy.data import GTI\n-from gammapy.irf import EffectiveAreaTable, EDispKernel, IRFStacker\n+from gammapy.irf import EDispKernel, EffectiveAreaTable, IRFStacker\n from gammapy.modeling import Dataset, Parameters\n from gammapy.modeling.models import SkyModel, SkyModels\n from gammapy.stats import cash, significance_on_off, significance, wstat\n from gammapy.utils.fits import energy_axis_to_ebounds\n from gammapy.utils.random import get_random_state\n-from gammapy.utils.scripts import make_path\n+from gammapy.utils.scripts import make_name, make_path\n from .core import CountsSpectrum, SpectrumEvaluator\n \n __all__ = [\n@@ -69,7 +69,7 @@ class SpectrumDataset(Dataset):\n background=None,\n mask_safe=None,\n mask_fit=None,\n- name=\"\",\n+ name=None,\n gti=None,\n ):\n \n@@ -88,9 +88,13 @@ class SpectrumDataset(Dataset):\n self.background = background\n self.models = models\n self.mask_safe = mask_safe\n- self.name = name\n self.gti = gti\n \n+ if name is None:\n+ self.name = make_name()\n+ else:\n+ self.name = name\n+\n def __str__(self):\n str_ = self.__class__.__name__\n str_ += \"\\n\\n\"\n" }, { "change_type": "MODIFY", "old_path": "gammapy/spectrum/flux_point.py", "new_path": "gammapy/spectrum/flux_point.py", "diff": "@@ -12,7 +12,7 @@ from gammapy.modeling.models import (\n SkyModels,\n )\n from gammapy.utils.interpolation import interpolate_profile\n-from gammapy.utils.scripts import make_path\n+from gammapy.utils.scripts import make_name, make_path\n from gammapy.utils.table import table_from_row_data, table_standardise_units_copy\n from .dataset import SpectrumDatasetOnOff\n \n@@ -1181,11 +1181,16 @@ class FluxPointsDataset(Dataset):\n stat_type = \"chi2\"\n tag = \"FluxPointsDataset\"\n \n- def __init__(self, models, data, mask_fit=None, mask_safe=None, name=\"\"):\n+ def __init__(self, models, data, mask_fit=None, mask_safe=None, name=None):\n self.data = data\n self.mask_fit = mask_fit\n- self.name = name\n self.models = models\n+\n+ if name is None:\n+ self.name = make_name()\n+ else:\n+ self.name = name\n+\n if data.sed_type != \"dnde\":\n raise ValueError(\"Currently only flux points of type 'dnde' are supported.\")\n \n" }, { "change_type": "MODIFY", "old_path": "gammapy/utils/scripts.py", "new_path": "gammapy/utils/scripts.py", "diff": "@@ -2,6 +2,7 @@\n \"\"\"Utils to create scripts and command-line tools\"\"\"\n import os.path\n from pathlib import Path\n+from uuid import uuid4\n import yaml\n \n __all__ = [\"read_yaml\", \"write_yaml\", \"make_path\", \"recursive_merge_dicts\"]\n@@ -53,6 +54,10 @@ def write_yaml(dictionary, filename, logger=None, sort_keys=True):\n path.write_text(text)\n \n \n+def make_name():\n+ return uuid4().hex[:8]\n+\n+\n def make_path(path):\n \"\"\"Expand environment variables on `~pathlib.Path` construction.\n \n" } ]
a1b7520cfca410f773da868fcddffe971fedd6af
gammapy/gammapy
13.02.2020 14:03:19
BSD 3-Clause New or Revised License
Add processing options for SkyModel and SkyDiffuseCube Add options to apply or not psf and edisp to SkyModel and SkyDiffuseCube
[ { "change_type": "MODIFY", "old_path": "gammapy/cube/fit.py", "new_path": "gammapy/cube/fit.py", "diff": "@@ -250,7 +250,9 @@ class MapDataset(Dataset):\n evaluator = self._evaluators.get(model.name)\n \n if evaluator is None:\n- evaluator = MapEvaluator(model=model, evaluation_mode=self.evaluation_mode)\n+ evaluator = MapEvaluator(\n+ model=model, evaluation_mode=self.evaluation_mode\n+ )\n self._evaluators[model.name] = evaluator\n \n # if the model component drifts out of its support the evaluator has\n@@ -928,9 +930,7 @@ class MapDataset(Dataset):\n kwargs[\"exposure\"] = self.exposure.cutout(**cutout_kwargs)\n \n if self.background_model is not None:\n- kwargs[\"models\"] = self.background_model.cutout(\n- **cutout_kwargs, name=name\n- )\n+ kwargs[\"models\"] = self.background_model.cutout(**cutout_kwargs, name=name)\n \n if self.edisp is not None:\n kwargs[\"edisp\"] = self.edisp.cutout(**cutout_kwargs)\n@@ -1646,9 +1646,9 @@ class MapEvaluator:\n \"\"\"\n flux = self.compute_flux()\n npred = self.apply_exposure(flux)\n- if self.psf is not None:\n+ if self.psf is not None and self.model.processing[\"psf\"] == True:\n npred = self.apply_psf(npred)\n- if self.edisp is not None:\n+ if self.edisp is not None and self.model.processing[\"edisp\"] == True:\n npred = self.apply_edisp(npred)\n \n return npred\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/cube.py", "new_path": "gammapy/modeling/models/cube.py", "diff": "@@ -14,6 +14,8 @@ from .core import Model, Models\n class SkyModelBase(Model):\n \"\"\"Sky model base class\"\"\"\n \n+ processing = {\"psf\": 1, \"edisp\": 1}\n+\n def __add__(self, other):\n if isinstance(other, (Models, list)):\n return Models([self, *other])\n@@ -32,6 +34,13 @@ class SkyModelBase(Model):\n coords = geom.get_coord(frame=self.frame)\n return self(coords.lon, coords.lat, coords[\"energy\"])\n \n+ def _update_processing(self, processing):\n+ if processing in [None, \"None\"]:\n+ self.processing = {\"psf\": 0, \"edisp\": 0}\n+ else:\n+ for key in processing:\n+ self.processing[key] = processing[key]\n+\n \n class SkyModel(SkyModelBase):\n \"\"\"Sky model component.\n@@ -55,7 +64,12 @@ class SkyModel(SkyModelBase):\n tag = \"SkyModel\"\n \n def __init__(\n- self, spectral_model, spatial_model=None, temporal_model=None, name=None\n+ self,\n+ spectral_model,\n+ spatial_model=None,\n+ temporal_model=None,\n+ name=None,\n+ processing={},\n ):\n self.spatial_model = spatial_model\n self.spectral_model = spectral_model\n@@ -65,6 +79,7 @@ class SkyModel(SkyModelBase):\n self.__dict__.pop(\"_parameters\")\n \n self._name = make_name(name)\n+ self._update_processing(processing)\n \n @property\n def name(self):\n@@ -212,6 +227,9 @@ class SkyModel(SkyModelBase):\n if self.temporal_model is not None:\n data[\"temporal\"] = self.temporal_model.to_dict()\n \n+ if self.processing != {\"psf\": 1, \"edisp\": 1}:\n+ data[\"processing\"] = self.processing\n+\n return data\n \n @classmethod\n@@ -245,6 +263,7 @@ class SkyModel(SkyModelBase):\n spatial_model=spatial_model,\n spectral_model=spectral_model,\n temporal_model=temporal_model,\n+ processing=data.get(\"processing\", {}),\n )\n \n def __str__(self):\n@@ -312,6 +331,7 @@ class SkyDiffuseCube(SkyModelBase):\n interp_kwargs=None,\n name=None,\n filename=None,\n+ processing={},\n ):\n \n self._name = make_name(name)\n@@ -334,6 +354,7 @@ class SkyDiffuseCube(SkyModelBase):\n # remove this again\n self._cached_value = None\n self._cached_coordinates = (None, None, None)\n+ self._update_processing(processing)\n \n super().__init__(norm=norm, tilt=tilt, reference=reference)\n \n@@ -417,6 +438,8 @@ class SkyDiffuseCube(SkyModelBase):\n def from_dict(cls, data):\n model = cls.read(data[\"filename\"])\n model._update_from_dict(data)\n+ processing = data.get(\"processing\", {})\n+ model._update_processing(processing)\n return model\n \n def to_dict(self):\n@@ -427,6 +450,9 @@ class SkyDiffuseCube(SkyModelBase):\n \n # Move parameters at the end\n data[\"parameters\"] = data.pop(\"parameters\")\n+ if self.processing != {\"psf\": 1, \"edisp\": 1}:\n+ data[\"processing\"] = self.processing\n+\n return data\n \n def __str__(self):\n" }, { "change_type": "MODIFY", "old_path": "gammapy/spectrum/core.py", "new_path": "gammapy/spectrum/core.py", "diff": "@@ -103,9 +103,15 @@ class CountsSpectrum:\n region = None\n wcs = None\n if hdu3 in hdulist:\n- region, wcs =cls.read_region_table(hdulist[hdu3])\n+ region, wcs = cls.read_region_table(hdulist[hdu3])\n \n- return cls(data=counts, energy_lo=ebounds[:-1], energy_hi=ebounds[1:], region=region, wcs=wcs)\n+ return cls(\n+ data=counts,\n+ energy_lo=ebounds[:-1],\n+ energy_hi=ebounds[1:],\n+ region=region,\n+ wcs=wcs,\n+ )\n \n @classmethod\n def read(cls, filename, hdu1=\"COUNTS\", hdu2=\"EBOUNDS\", hdu3=\"REGION\"):\n@@ -124,8 +130,6 @@ class CountsSpectrum:\n names = [\"CHANNEL\", \"COUNTS\"]\n meta = {\"name\": \"COUNTS\"}\n \n-\n-\n return Table([channel, counts], names=names, meta=meta)\n \n def _to_region_table(self):\n@@ -156,7 +160,7 @@ class CountsSpectrum:\n ebounds = energy_axis_to_ebounds(energy)\n \n region_table = self._to_region_table()\n- region_hdu = fits.BinTableHDU(region_table, name='REGION')\n+ region_hdu = fits.BinTableHDU(region_table, name=\"REGION\")\n return fits.HDUList([fits.PrimaryHDU(), hdu, ebounds, region_hdu])\n \n def write(self, filename, use_sherpa=False, **kwargs):\n@@ -439,7 +443,7 @@ class SpectrumEvaluator:\n def apply_edisp(self, true_counts):\n from . import CountsSpectrum\n \n- if self.edisp is not None:\n+ if self.edisp is not None and self.model.processing[\"edisp\"] == True:\n cts = self.edisp.apply(true_counts)\n e_reco = self.edisp.e_reco.edges\n else:\n" } ]
36d511791b9b9dd64c09844a09865e73dac650ba
gammapy/gammapy
08.07.2020 12:03:36
BSD 3-Clause New or Revised License
Add shorter tags for models Redefine most of models tags with a list including shorter aliases to be used with model.create(tag) and in YAML serialization. By default the tag returned in model.to_dict() is the 0th tag which is also the class name.
[ { "change_type": "MODIFY", "old_path": "gammapy/catalog/fermi.py", "new_path": "gammapy/catalog/fermi.py", "diff": "@@ -184,7 +184,7 @@ class SourceCatalogObjectFermiBase(SourceCatalogObject, abc.ABC):\n lat_err = semi_major / scale_1sigma\n lon_err = semi_minor / scale_1sigma / np.cos(d[\"DEJ2000\"])\n \n- if model.tag != \"TemplateSpatialModel\":\n+ if \"TemplateSpatialModel\" not in model.tag:\n model.parameters[\"lon_0\"].error = lon_err\n model.parameters[\"lat_0\"].error = lat_err\n model.phi_0 = phi_0\n" }, { "change_type": "MODIFY", "old_path": "gammapy/catalog/tests/test_fermi.py", "new_path": "gammapy/catalog/tests/test_fermi.py", "diff": "@@ -165,7 +165,7 @@ class TestFermi4FGLObject:\n \n def test_spatial_model(self):\n model = self.cat[\"4FGL J0000.3-7355\"].spatial_model()\n- assert model.tag == \"PointSpatialModel\"\n+ assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 0.0983)\n@@ -178,7 +178,7 @@ class TestFermi4FGLObject:\n assert_allclose(model.position.dec.value, pos_err.center.dec.value)\n \n model = self.cat[\"4FGL J1409.1-6121e\"].spatial_model()\n- assert model.tag == \"DiskSpatialModel\"\n+ assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 212.294006)\n@@ -186,7 +186,7 @@ class TestFermi4FGLObject:\n assert_allclose(p[\"r_0\"].value, 0.7331369519233704)\n \n model = self.cat[\"4FGL J0617.2+2234e\"].spatial_model()\n- assert model.tag == \"GaussianSpatialModel\"\n+ assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 94.309998)\n@@ -194,7 +194,7 @@ class TestFermi4FGLObject:\n assert_allclose(p[\"sigma\"].value, 0.27)\n \n model = self.cat[\"4FGL J1443.0-6227e\"].spatial_model()\n- assert model.tag == \"TemplateSpatialModel\"\n+ assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n \n@@ -305,14 +305,14 @@ class TestFermi3FGLObject:\n \n def test_spatial_model(self):\n model = self.cat[0].spatial_model()\n- assert model.tag == \"PointSpatialModel\"\n+ assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 0.0377)\n assert_allclose(p[\"lat_0\"].value, 65.751701)\n \n model = self.cat[122].spatial_model()\n- assert model.tag == \"GaussianSpatialModel\"\n+ assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 14.75)\n@@ -320,7 +320,7 @@ class TestFermi3FGLObject:\n assert_allclose(p[\"sigma\"].value, 1.35)\n \n model = self.cat[955].spatial_model()\n- assert model.tag == \"DiskSpatialModel\"\n+ assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 128.287201)\n@@ -328,7 +328,7 @@ class TestFermi3FGLObject:\n assert_allclose(p[\"r_0\"].value, 0.91)\n \n model = self.cat[602].spatial_model()\n- assert model.tag == \"TemplateSpatialModel\"\n+ assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n \n@@ -442,7 +442,7 @@ class TestFermi2FHLObject:\n \n def test_spatial_model(self):\n model = self.cat[221].spatial_model()\n- assert model.tag == \"PointSpatialModel\"\n+ assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 221.281998, rtol=1e-5)\n@@ -459,7 +459,7 @@ class TestFermi2FHLObject:\n assert_allclose(model.position.dec.value, pos_err.center.dec.value)\n \n model = self.cat[97].spatial_model()\n- assert model.tag == \"GaussianSpatialModel\"\n+ assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 94.309998, rtol=1e-5)\n@@ -467,7 +467,7 @@ class TestFermi2FHLObject:\n assert_allclose(p[\"sigma\"].value, 0.27)\n \n model = self.cat[134].spatial_model()\n- assert model.tag == \"DiskSpatialModel\"\n+ assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 125.660004, rtol=1e-5)\n@@ -475,7 +475,7 @@ class TestFermi2FHLObject:\n assert_allclose(p[\"r_0\"].value, 0.37)\n \n model = self.cat[256].spatial_model()\n- assert model.tag == \"TemplateSpatialModel\"\n+ assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n # TODO: have to check the extended template used for RX J1713,\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/core.py", "new_path": "gammapy/modeling/models/core.py", "diff": "@@ -106,7 +106,8 @@ class Model:\n \n def to_dict(self):\n \"\"\"Create dict for YAML serialisation\"\"\"\n- return {\"type\": self.tag, \"parameters\": self.parameters.to_dict()}\n+ tag = self.tag[0] if isinstance(self.tag, list) else self.tag\n+ return {\"type\": tag, \"parameters\": self.parameters.to_dict()}\n \n @classmethod\n def from_dict(cls, data):\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/spatial.py", "new_path": "gammapy/modeling/models/spatial.py", "diff": "@@ -210,7 +210,7 @@ class PointSpatialModel(SpatialModel):\n Center position coordinate frame\n \"\"\"\n \n- tag = \"PointSpatialModel\"\n+ tag = [\"PointSpatialModel\", \"PS\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n \n@@ -281,7 +281,7 @@ class GaussianSpatialModel(SpatialModel):\n Center position coordinate frame\n \"\"\"\n \n- tag = \"GaussianSpatialModel\"\n+ tag = [\"GaussianSpatialModel\", \"GaussianSpatial\"]\n \n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n@@ -350,7 +350,7 @@ class DiskSpatialModel(SpatialModel):\n Center position coordinate frame\n \"\"\"\n \n- tag = \"DiskSpatialModel\"\n+ tag = [\"DiskSpatialModel\", \"disk\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n r_0 = Parameter(\"r_0\", \"1 deg\", min=0)\n@@ -436,7 +436,7 @@ class ShellSpatialModel(SpatialModel):\n Center position coordinate frame\n \"\"\"\n \n- tag = \"ShellSpatialModel\"\n+ tag = [\"ShellSpatialModel\", \"shell\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n radius = Parameter(\"radius\", \"1 deg\")\n@@ -489,7 +489,7 @@ class ConstantSpatialModel(SpatialModel):\n Value\n \"\"\"\n \n- tag = \"ConstantSpatialModel\"\n+ tag = [\"ConstantSpatialModel\", \"ConstantSpatial\"]\n value = Parameter(\"value\", \"1 sr-1\", frozen=True)\n \n frame = \"icrs\"\n@@ -546,7 +546,7 @@ class TemplateSpatialModel(SpatialModel):\n Default arguments are {'interp': 'linear', 'fill_value': 0}.\n \"\"\"\n \n- tag = \"TemplateSpatialModel\"\n+ tag = [\"TemplateSpatialModel\", \"TemplateSpatial\"]\n norm = Parameter(\"norm\", 1)\n \n def __init__(\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/spectral.py", "new_path": "gammapy/modeling/models/spectral.py", "diff": "@@ -389,7 +389,7 @@ class ConstantSpectralModel(SpectralModel):\n :math:`k`\n \"\"\"\n \n- tag = \"ConstantSpectralModel\"\n+ tag = [\"ConstantSpectralModel\", \"ConstantSpectral\"]\n const = Parameter(\"const\", \"1e-12 cm-2 s-1 TeV-1\")\n \n @staticmethod\n@@ -404,7 +404,7 @@ class CompoundSpectralModel(SpectralModel):\n For more information see :ref:`compound-spectral-model`.\n \"\"\"\n \n- tag = \"CompoundSpectralModel\"\n+ tag = [\"CompoundSpectralModel\", \"CompoundSpectral\"]\n \n def __init__(self, model1, model2, operator):\n self.model1 = model1\n@@ -452,7 +452,7 @@ class PowerLawSpectralModel(SpectralModel):\n :math:`E_0`\n \"\"\"\n \n- tag = \"PowerLawSpectralModel\"\n+ tag = [\"PowerLawSpectralModel\", \"PL\"]\n index = Parameter(\"index\", 2.0)\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n@@ -561,7 +561,7 @@ class PowerLaw2SpectralModel(SpectralModel):\n Upper energy limit :math:`E_{0, max}`.\n \"\"\"\n \n- tag = \"PowerLaw2SpectralModel\"\n+ tag = [\"PowerLaw2SpectralModel\", \"PL2\"]\n \n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1\")\n index = Parameter(\"index\", 2)\n@@ -675,7 +675,7 @@ class SmoothBrokenPowerLawSpectralModel(SpectralModel):\n :math:`\\beta`\n \"\"\"\n \n- tag = \"SmoothBrokenPowerLawSpectralModel\"\n+ tag = [\"SmoothBrokenPowerLawSpectralModel\", \"SBPL\"]\n index1 = Parameter(\"index1\", 2.0)\n index2 = Parameter(\"index2\", 2.0)\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n@@ -711,7 +711,7 @@ class ExpCutoffPowerLawSpectralModel(SpectralModel):\n :math:`\\alpha`\n \"\"\"\n \n- tag = \"ExpCutoffPowerLawSpectralModel\"\n+ tag = [\"ExpCutoffPowerLawSpectralModel\", \"ECPL\"]\n \n index = Parameter(\"index\", 1.5)\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n@@ -764,7 +764,7 @@ class ExpCutoffPowerLaw3FGLSpectralModel(SpectralModel):\n :math:`E_{C}`\n \"\"\"\n \n- tag = \"ExpCutoffPowerLaw3FGLSpectralModel\"\n+ tag = [\"ExpCutoffPowerLaw3FGLSpectralModel\", \"ECPL3FGL\"]\n index = Parameter(\"index\", 1.5)\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n@@ -803,7 +803,7 @@ class SuperExpCutoffPowerLaw3FGLSpectralModel(SpectralModel):\n :math:`E_{C}`\n \"\"\"\n \n- tag = \"SuperExpCutoffPowerLaw3FGLSpectralModel\"\n+ tag = [\"SuperExpCutoffPowerLaw3FGLSpectralModel\", \"SECPL3FGL\"]\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n ecut = Parameter(\"ecut\", \"10 TeV\")\n@@ -838,7 +838,7 @@ class SuperExpCutoffPowerLaw4FGLSpectralModel(SpectralModel):\n internally assumes unit of :math:`[E_0]` power :math:`-\\Gamma_2`\n \"\"\"\n \n- tag = \"SuperExpCutoffPowerLaw4FGLSpectralModel\"\n+ tag = [\"SuperExpCutoffPowerLaw4FGLSpectralModel\", \"SECPL4FGL\"]\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n expfactor = Parameter(\"expfactor\", \"1e-2\")\n@@ -874,7 +874,7 @@ class LogParabolaSpectralModel(SpectralModel):\n :math:`\\beta`\n \"\"\"\n \n- tag = \"LogParabolaSpectralModel\"\n+ tag = [\"LogParabolaSpectralModel\", \"LP\", \"logpar\"]\n amplitude = Parameter(\"amplitude\", \"1e-12 cm-2 s-1 TeV-1\")\n reference = Parameter(\"reference\", \"10 TeV\", frozen=True)\n alpha = Parameter(\"alpha\", 2)\n@@ -931,7 +931,7 @@ class TemplateSpectralModel(SpectralModel):\n Meta information, meta['filename'] will be used for serialization\n \"\"\"\n \n- tag = \"TemplateSpectralModel\"\n+ tag = [\"TemplateSpectralModel\", \"TemplateSpectral\"]\n norm = Parameter(\"norm\", 1, unit=\"\")\n tilt = Parameter(\"tilt\", 0, unit=\"\", frozen=True)\n reference = Parameter(\"reference\", \"1 TeV\", frozen=True)\n@@ -1017,7 +1017,7 @@ class TemplateSpectralModel(SpectralModel):\n \n def to_dict(self):\n return {\n- \"type\": self.tag,\n+ \"type\": self.tag[0],\n \"parameters\": self.parameters.to_dict(),\n \"energy\": {\n \"data\": self.energy.data.tolist(),\n@@ -1048,7 +1048,7 @@ class ScaleSpectralModel(SpectralModel):\n Multiplicative norm factor for the model value.\n \"\"\"\n \n- tag = \"ScaleSpectralModel\"\n+ tag = [\"ScaleSpectralModel\", \"ScaleSpectral\"]\n norm = Parameter(\"norm\", 1, unit=\"\")\n \n def __init__(self, model, norm=norm.quantity):\n@@ -1351,7 +1351,7 @@ class NaimaSpectralModel(SpectralModel):\n for now this is used only for synchrotron self-compton model\n \"\"\"\n \n- tag = \"NaimaSpectralModel\"\n+ tag = [\"NaimaSpectralModel\", \"NaimaSpectral\"]\n \n def __init__(\n self, radiative_model, distance=1.0 * u.kpc, seed=None, nested_models=None\n@@ -1495,7 +1495,7 @@ class GaussianSpectralModel(SpectralModel):\n :math:`\\sigma`\n \"\"\"\n \n- tag = \"GaussianSpectralModel\"\n+ tag = [\"GaussianSpectralModel\", \"GaussianSpectral\"]\n norm = Parameter(\"norm\", 1e-12 * u.Unit(\"cm-2 s-1\"))\n mean = Parameter(\"mean\", 1 * u.TeV)\n sigma = Parameter(\"sigma\", 2 * u.TeV)\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_core.py", "new_path": "gammapy/modeling/models/tests/test_core.py", "diff": "@@ -141,7 +141,7 @@ def test_model_create():\n spectral_model = Model.create(\n \"PowerLaw2SpectralModel\", amplitude=\"1e-10 cm-2 s-1\", index=3\n )\n- assert spectral_model.tag == \"PowerLaw2SpectralModel\"\n+ assert \"PowerLaw2SpectralModel\" in spectral_model.tag\n assert_allclose(spectral_model.index.value, 3)\n \n \n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_io.py", "new_path": "gammapy/modeling/models/tests/test_io.py", "diff": "@@ -31,8 +31,8 @@ def test_dict_to_skymodels():\n assert model0.name == \"background_irf\"\n \n model0 = models[1]\n- assert model0.spectral_model.tag == \"ExpCutoffPowerLawSpectralModel\"\n- assert model0.spatial_model.tag == \"PointSpatialModel\"\n+ assert \"ExpCutoffPowerLawSpectralModel\" in model0.spectral_model.tag\n+ assert \"PointSpatialModel\" in model0.spatial_model.tag\n \n pars0 = model0.parameters\n assert pars0[\"index\"].value == 2.1\n@@ -59,9 +59,11 @@ def test_dict_to_skymodels():\n assert np.isnan(pars0[\"lambda_\"].max)\n \n model1 = models[2]\n- assert model1.spectral_model.tag == \"PowerLawSpectralModel\"\n- assert model1.spatial_model.tag == \"DiskSpatialModel\"\n- assert model1.temporal_model.tag == \"LightCurveTemplateTemporalModel\"\n+ assert \"PL\" in model1.spectral_model.tag\n+ assert \"PowerLawSpectralModel\" in model1.spectral_model.tag\n+ assert \"DiskSpatialModel\" in model1.spatial_model.tag\n+ assert \"disk\" in model1.spatial_model.tag\n+ assert \"LightCurveTemplateTemporalModel\" in model1.temporal_model.tag\n \n pars1 = model1.parameters\n assert pars1[\"index\"].value == 2.2\n@@ -82,8 +84,8 @@ def test_dict_to_skymodels():\n )\n assert model2.spectral_model.values.unit == \"1 / (cm2 MeV s sr)\"\n \n- assert model2.spectral_model.tag == \"TemplateSpectralModel\"\n- assert model2.spatial_model.tag == \"TemplateSpatialModel\"\n+ assert \"TemplateSpectralModel\" in model2.spectral_model.tag\n+ assert \"TemplateSpatialModel\" in model2.spatial_model.tag\n \n assert model2.spatial_model.parameters[\"norm\"].value == 1.0\n assert not model2.spatial_model.normalize\n@@ -129,7 +131,7 @@ def test_absorption_io(tmp_path):\n assert new_model.redshift.value == 0.5\n assert new_model.alpha_norm.name == \"alpha_norm\"\n assert new_model.alpha_norm.value == 1\n- assert new_model.spectral_model.tag == \"PowerLawSpectralModel\"\n+ assert \"PowerLawSpectralModel\" in new_model.spectral_model.tag\n assert_allclose(new_model.absorption.energy, dominguez.energy)\n assert_allclose(new_model.absorption.param, dominguez.param)\n assert len(new_model.parameters) == 5\n@@ -202,12 +204,16 @@ def make_all_models():\n \n @pytest.mark.parametrize(\"model_class\", MODEL_REGISTRY)\n def test_all_model_classes(model_class):\n- assert model_class.tag == model_class.__name__\n+ if isinstance(model_class.tag, list):\n+ assert model_class.tag[0] == model_class.__name__\n+ else:\n+ assert model_class.tag == model_class.__name__\n \n \n @pytest.mark.parametrize(\"model\", make_all_models())\n def test_all_model_instances(model):\n- assert model.tag == model.__class__.__name__\n+ tag = model.tag[0] if isinstance(model.tag, list) else model.tag\n+ assert tag == model.__class__.__name__\n \n \n @requires_data()\n" }, { "change_type": "MODIFY", "old_path": "gammapy/utils/registry.py", "new_path": "gammapy/utils/registry.py", "diff": "@@ -1,5 +1,4 @@\n # Licensed under a 3-clause BSD style license - see LICENSE.rst\n-\n __all__ = [\"Registry\"]\n \n \n@@ -8,15 +7,15 @@ class Registry(list):\n \n def get_cls(self, tag):\n for cls in self:\n- if hasattr(cls, \"tag\") and cls.tag == tag:\n+ if hasattr(cls, \"tag\") and tag in cls.tag:\n return cls\n raise KeyError(f\"No model found with tag: {tag!r}\")\n \n def __str__(self):\n info = \"Registry\\n\"\n info += \"--------\\n\\n\"\n-\n- len_max = max([len(_.tag) for _ in self])\n+ tags = [_.tag[0] if isinstance(_.tag, list) else _.tag for _ in self]\n+ len_max = max([len(tag) for tag in tags])\n \n for item in self:\n info += f\"\\t{item.tag:{len_max}s}: {item.__name__}\\n\"\n" } ]
cbd77d0db9c0b2ff720d1fb2d0cd2ad19ee1a369
gammapy/gammapy
12.10.2020 14:28:56
BSD 3-Clause New or Revised License
Add option to remove default output in models yaml file Add option full_output (True by default) to model.to_yaml() and .to_dict(). Switching to False remove the entries for min, max, frozen and error if they are the same than the class defaults or nan.
[ { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/core.py", "new_path": "gammapy/modeling/models/core.py", "diff": "@@ -105,10 +105,20 @@ class Model:\n \"\"\"A deep copy.\"\"\"\n return copy.deepcopy(self)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serialisation\"\"\"\n tag = self.tag[0] if isinstance(self.tag, list) else self.tag\n- return {\"type\": tag, \"parameters\": self.parameters.to_dict()}\n+ params = self.parameters.to_dict()\n+\n+ if full_output is False:\n+ base = self.__class__\n+ names = self.parameters.names\n+ for k, name in enumerate(names):\n+ init = base.__dict__[name].to_dict()\n+ for item in [\"min\", \"max\", \"frozen\", \"error\"]:\n+ if params[k][item] == init[item] or np.isnan(init[item]):\n+ del params[k][item]\n+ return {\"type\": tag, \"parameters\": params}\n \n @classmethod\n def from_dict(cls, data):\n@@ -281,7 +291,7 @@ class Models(collections.abc.MutableSequence):\n shared_register = _set_link(shared_register, model)\n return models\n \n- def write(self, path, overwrite=False, write_covariance=True):\n+ def write(self, path, overwrite=False, full_output=True, write_covariance=True):\n \"\"\"Write to YAML file.\n \n Parameters\n@@ -315,14 +325,14 @@ class Models(collections.abc.MutableSequence):\n \n path.write_text(self.to_yaml())\n \n- def to_yaml(self):\n+ def to_yaml(self, full_output=True):\n \"\"\"Convert to YAML string.\"\"\"\n- data = self.to_dict()\n+ data = self.to_dict(full_output)\n return yaml.dump(\n data, sort_keys=False, indent=4, width=80, default_flow_style=False\n )\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Convert to dict.\"\"\"\n # update linked parameters labels\n params_list = []\n@@ -338,7 +348,7 @@ class Models(collections.abc.MutableSequence):\n \n models_data = []\n for model in self._models:\n- model_data = model.to_dict()\n+ model_data = model.to_dict(full_output)\n models_data.append(model_data)\n if self._covar_file is not None:\n return {\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/cube.py", "new_path": "gammapy/modeling/models/cube.py", "diff": "@@ -314,18 +314,18 @@ class SkyModel(SkyModelBase):\n \n return self.__class__(**kwargs)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n data = {}\n data[\"name\"] = self.name\n data[\"type\"] = self.tag\n- data[\"spectral\"] = self.spectral_model.to_dict()\n+ data[\"spectral\"] = self.spectral_model.to_dict(full_output)\n \n if self.spatial_model is not None:\n- data[\"spatial\"] = self.spatial_model.to_dict()\n+ data[\"spatial\"] = self.spatial_model.to_dict(full_output)\n \n if self.temporal_model is not None:\n- data[\"temporal\"] = self.temporal_model.to_dict()\n+ data[\"temporal\"] = self.temporal_model.to_dict(full_output)\n \n if self.apply_irf != self._apply_irf_default:\n data[\"apply_irf\"] = self.apply_irf\n@@ -488,11 +488,11 @@ class BackgroundModel(Model):\n back_values = self.map.data * value\n return self.map.copy(data=back_values)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n data = {}\n data[\"name\"] = self.name\n data[\"type\"] = self.tag\n- data[\"spectral\"] = self.spectral_model.to_dict()\n+ data[\"spectral\"] = self.spectral_model.to_dict(full_output)\n \n if self.filename is not None:\n data[\"filename\"] = self.filename\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/spatial.py", "new_path": "gammapy/modeling/models/spatial.py", "diff": "@@ -38,6 +38,7 @@ def compute_sigma_eff(lon_0, lat_0, lon, lat, phi, major_axis, e):\n \n class SpatialModel(Model):\n \"\"\"Spatial model base class.\"\"\"\n+\n _type = \"spatial\"\n \n def __init__(self, **kwargs):\n@@ -142,9 +143,9 @@ class SpatialModel(Model):\n data = values * geom.solid_angle()\n return Map.from_geom(geom=geom, data=data.value, unit=data.unit)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n- data = super().to_dict()\n+ data = super().to_dict(full_output)\n data[\"frame\"] = self.frame\n data[\"parameters\"] = data.pop(\"parameters\")\n return data\n@@ -605,10 +606,10 @@ class ConstantSpatialModel(SpatialModel):\n evaluation_radius = None\n position = None\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n # redefined to ignore frame attribute from parent class\n- data = super().to_dict()\n+ data = super().to_dict(full_output)\n data.pop(\"frame\")\n data[\"parameters\"] = data.pop(\"parameters\")\n return data\n@@ -643,10 +644,10 @@ class ConstantFluxSpatialModel(SpatialModel):\n evaluation_radius = None\n position = None\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n # redefined to ignore frame attribute from parent class\n- data = super().to_dict()\n+ data = super().to_dict(full_output)\n data.pop(\"frame\")\n return data\n \n@@ -785,9 +786,9 @@ class TemplateSpatialModel(SpatialModel):\n m = Map.read(filename)\n return cls(m, normalize=normalize, filename=filename)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n \"\"\"Create dict for YAML serilisation\"\"\"\n- data = super().to_dict()\n+ data = super().to_dict(full_output)\n data[\"filename\"] = self.filename\n data[\"normalize\"] = self.normalize\n data[\"unit\"] = str(self.map.unit)\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/spectral.py", "new_path": "gammapy/modeling/models/spectral.py", "diff": "@@ -57,6 +57,7 @@ def integrate_spectrum(func, emin, emax, ndecade=100, intervals=False):\n \n class SpectralModel(Model):\n \"\"\"Spectral model base class.\"\"\"\n+\n _type = \"spectral\"\n \n def __call__(self, energy):\n@@ -471,11 +472,11 @@ class CompoundSpectralModel(SpectralModel):\n val2 = self.model2(energy)\n return self.operator(val1, val2)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n return {\n \"type\": self.tag[0],\n- \"model1\": self.model1.to_dict(),\n- \"model2\": self.model2.to_dict(),\n+ \"model1\": self.model1.to_dict(full_output),\n+ \"model2\": self.model2.to_dict(full_output),\n \"operator\": self.operator.__name__,\n }\n \n@@ -1249,7 +1250,7 @@ class TemplateSpectralModel(SpectralModel):\n \"\"\"Evaluate the model (static function).\"\"\"\n return self._evaluate((energy,), clip=True)\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n return {\n \"type\": self.tag[0],\n \"energy\": {\n@@ -1334,7 +1335,7 @@ class Absorption:\n points=(self.param, self.energy), values=self.data, **interp_kwargs\n )\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n if self.filename is None:\n return {\n \"type\": self.tag,\n@@ -1539,11 +1540,11 @@ class AbsorbedSpectralModel(SpectralModel):\n absorption = np.power(absorption, alpha_norm)\n return dnde * absorption\n \n- def to_dict(self):\n+ def to_dict(self, full_output=True):\n return {\n \"type\": self.tag,\n- \"base_model\": self.spectral_model.to_dict(),\n- \"absorption\": self.absorption.to_dict(),\n+ \"base_model\": self.spectral_model.to_dict(full_output),\n+ \"absorption\": self.absorption.to_dict(full_output),\n \"absorption_parameter\": {\"name\": \"redshift\", \"value\": self.redshift.value,},\n \"parameters\": Parameters([self.redshift, self.alpha_norm]).to_dict(),\n }\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/temporal.py", "new_path": "gammapy/modeling/models/temporal.py", "diff": "@@ -17,6 +17,7 @@ from .core import Model\n class TemporalModel(Model):\n \"\"\"Temporal model base class.\n evaluates on astropy.time.Time objects\"\"\"\n+\n _type = \"temporal\"\n \n def __call__(self, time):\n@@ -72,18 +73,18 @@ class TemporalModel(Model):\n axis\n \"\"\"\n \n-\n import matplotlib.pyplot as plt\n \n ax = plt.gca() if ax is None else ax\n t_min, t_max = time_range\n n_value = 100\n- delta = (t_max - t_min)\n+ delta = t_max - t_min\n times = t_min + delta * np.linspace(0, 1, n_value)\n val = self(times)\n ax.plot(times.mjd, val)\n return ax\n \n+\n class ConstantTemporalModel(TemporalModel):\n \"\"\"Constant temporal model.\"\"\"\n \n@@ -191,8 +192,6 @@ class ExpDecayTemporalModel(TemporalModel):\n return -t0 * value / self.time_sum(t_min, t_max)\n \n \n-\n-\n class GaussianTemporalModel(TemporalModel):\n r\"\"\"A Gaussian temporal profile\n \n@@ -434,6 +433,6 @@ class LightCurveTemplateTemporalModel(TemporalModel):\n def from_dict(cls, data):\n return cls.read(data[\"filename\"])\n \n- def to_dict(self, overwrite=False):\n+ def to_dict(self, full_output=True, overwrite=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n return {\"type\": self.tag[0], \"filename\": self.filename}\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_core.py", "new_path": "gammapy/modeling/models/tests/test_core.py", "diff": "@@ -9,12 +9,14 @@ from gammapy.utils.testing import requires_data\n \n class MyModel(Model):\n \"\"\"Simple model example\"\"\"\n+\n x = Parameter(\"x\", 1, \"cm\")\n y = Parameter(\"y\", 2)\n \n \n class CoModel(Model):\n \"\"\"Compound model example\"\"\"\n+\n norm = Parameter(\"norm\", 42, \"cm\")\n \n def __init__(self, m1, m2, norm=norm.quantity):\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_cube.py", "new_path": "gammapy/modeling/models/tests/test_cube.py", "diff": "@@ -59,10 +59,7 @@ def diffuse_model():\n )\n m.data += 42\n spatial_model = TemplateSpatialModel(m, normalize=False)\n- return SkyModel(\n- PowerLawNormSpectralModel(),\n- spatial_model\n- )\n+ return SkyModel(PowerLawNormSpectralModel(), spatial_model)\n \n \n @pytest.fixture(scope=\"session\")\n@@ -560,4 +557,4 @@ def test_fermi_isotropic():\n \n assert_allclose(flux.value, 1.463e-13, rtol=1e-3)\n assert flux.unit == \"MeV-1 cm-2 s-1 sr-1\"\n- assert isinstance(model.spectral_model, CompoundSpectralModel)\n\\ No newline at end of file\n+ assert isinstance(model.spectral_model, CompoundSpectralModel)\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_io.py", "new_path": "gammapy/modeling/models/tests/test_io.py", "diff": "@@ -8,6 +8,7 @@ from astropy.utils.data import get_pkg_data_filename\n from gammapy.maps import Map, MapAxis\n from gammapy.modeling.models import (\n MODEL_REGISTRY,\n+ PowerLawSpectralModel,\n AbsorbedSpectralModel,\n Absorption,\n BackgroundModel,\n@@ -233,5 +234,15 @@ def test_missing_parameters():\n assert len(models[\"source1\"].spatial_model.parameters) == 6\n \n \n+def test_simplified_output():\n+ model = PowerLawSpectralModel()\n+ full = model.to_dict()\n+ simplified = model.to_dict(full_output=False)\n+ for k, name in enumerate(model.parameters.names):\n+ for item in [\"min\", \"max\", \"frozen\", \"error\"]:\n+ assert item in full[\"parameters\"][k]\n+ assert item not in simplified[\"parameters\"][k]\n+\n+\n def test_registries_print():\n print(MODEL_REGISTRY)\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_spectral.py", "new_path": "gammapy/modeling/models/tests/test_spectral.py", "diff": "@@ -776,26 +776,26 @@ def test_integral_error_PowerLaw():\n emax = energy[1:]\n \n powerlaw = PowerLawSpectralModel()\n- powerlaw.parameters['index'].error = 0.4\n- powerlaw.parameters['amplitude'].error = 1e-13\n+ powerlaw.parameters[\"index\"].error = 0.4\n+ powerlaw.parameters[\"amplitude\"].error = 1e-13\n \n- flux, flux_error = powerlaw.integral_error(emin,emax)\n+ flux, flux_error = powerlaw.integral_error(emin, emax)\n \n- assert_allclose(flux.value[0]/1e-13, 5.0, rtol=0.1)\n- assert_allclose(flux_error.value[0]/1e-14, 8.546615432273905, rtol=0.01)\n+ assert_allclose(flux.value[0] / 1e-13, 5.0, rtol=0.1)\n+ assert_allclose(flux_error.value[0] / 1e-14, 8.546615432273905, rtol=0.01)\n \n \n def test_integral_error_ExpCutOffPowerLaw():\n energy = np.linspace(1 * u.TeV, 10 * u.TeV, 10)\n emin = energy[:-1]\n emax = energy[1:]\n- \n+\n exppowerlaw = ExpCutoffPowerLawSpectralModel()\n- exppowerlaw.parameters['index'].error = 0.4\n- exppowerlaw.parameters['amplitude'].error = 1e-13\n- exppowerlaw.parameters['lambda_'].error = 0.03\n- \n+ exppowerlaw.parameters[\"index\"].error = 0.4\n+ exppowerlaw.parameters[\"amplitude\"].error = 1e-13\n+ exppowerlaw.parameters[\"lambda_\"].error = 0.03\n+\n flux, flux_error = exppowerlaw.integral_error(emin, emax)\n- \n- assert_allclose(flux.value[0]/1e-13, 5.05855622, rtol=0.01)\n- assert_allclose(flux_error.value[0]/1e-14, 8.90907063, rtol=0.01)\n+\n+ assert_allclose(flux.value[0] / 1e-13, 5.05855622, rtol=0.01)\n+ assert_allclose(flux_error.value[0] / 1e-14, 8.90907063, rtol=0.01)\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_temporal.py", "new_path": "gammapy/modeling/models/tests/test_temporal.py", "diff": "@@ -217,4 +217,3 @@ def test_plot_constant_model():\n constant_model = ConstantTemporalModel(const=1)\n with mpl_plot_check():\n constant_model.plot(time_range)\n-\n" } ]
a624b892b3d05a48a9c9c582222f1c02699293a5
gammapy/gammapy
09.07.2020 08:21:14
BSD 3-Clause New or Revised License
Add PiecewiseBrokenPowerLawSpectralModel Add PiecewiseBrokenPowerLawSpectralModel, a generalised broken power law defined by a set of energy and intensity values (nodes) that are piecewise connected by power laws.
[ { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/__init__.py", "new_path": "gammapy/modeling/models/__init__.py", "diff": "@@ -30,6 +30,7 @@ SPECTRAL_MODEL_REGISTRY = Registry(\n PowerLaw2SpectralModel,\n BrokenPowerLawSpectralModel,\n SmoothBrokenPowerLawSpectralModel,\n+ PiecewiseBrokenPowerLawSpectralModel,\n ExpCutoffPowerLawSpectralModel,\n ExpCutoffPowerLaw3FGLSpectralModel,\n SuperExpCutoffPowerLaw3FGLSpectralModel,\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/spectral.py", "new_path": "gammapy/modeling/models/spectral.py", "diff": "@@ -857,6 +857,97 @@ class SmoothBrokenPowerLawSpectralModel(SpectralModel):\n return pwl * brk\n \n \n+class PiecewiseBrokenPowerLawSpectralModel(SpectralModel):\n+ \"\"\"Piecewise broken power-law at fixed energy nodes.\n+\n+ Parameters\n+ ----------\n+ energy : `~astropy.units.Quantity`\n+ Array of energies at which the model values are given (nodes).\n+ values : array\n+ Array with the initial values of the model at energies ``energy``.\n+ A normalisation parameters is created for each value.\n+ \"\"\"\n+\n+ tag = \"PiecewiseBrokenPowerLawSpectralModel\"\n+\n+ def __init__(self, energy, values, parameters=None):\n+ self._energy = energy\n+ self.init_values = values\n+ if len(values) != len(energy):\n+ raise ValueError(\"dimension mismatch\")\n+ if len(values) < 2:\n+ raise ValueError(\"Input arrays must contians at least 2 elements\")\n+ if parameters is None:\n+ parameters = Parameters(\n+ [Parameter(f\"norm{k}\", 1.0) for k in range(len(values))]\n+ )\n+ for parameter in parameters:\n+ setattr(self, parameter.name, parameter)\n+ self.default_parameters = parameters\n+\n+ @classmethod\n+ def from_parameters(cls, parameters, energy, values):\n+ init = cls(energy, values, parameters=parameters)\n+ return init\n+\n+ @property\n+ def values(self):\n+ return np.array([p.value for p in self.parameters]) * self.init_values\n+\n+ @property\n+ def energy(self):\n+ return self._energy\n+\n+ def __call__(self, energy):\n+ return self.evaluate(energy)\n+\n+ def evaluate(self, energy):\n+ logedata = np.log10(np.atleast_1d(energy.value))\n+ loge = np.log10(self.energy.to(energy.unit).value)\n+ logv = np.log10(self.values.value)\n+ ne = len(loge)\n+ conds = (\n+ [(logedata < loge[1])]\n+ + [\n+ (logedata >= loge[k]) & (logedata < loge[k + 1])\n+ for k in range(1, ne - 2)\n+ ]\n+ + [(logedata >= loge[-2])]\n+ )\n+ a = (logv[1:] - logv[:-1]) / (loge[1:] - loge[:-1])\n+ b = logv[1:] - a * loge[1:]\n+\n+ output = np.zeros(logedata.shape)\n+ for k in range(ne - 1):\n+ output[conds[k]] = 10 ** (a[k] * logedata[conds[k]] + b[k])\n+ return output * self.values.unit\n+\n+ def to_dict(self):\n+ return {\n+ \"type\": self.tag,\n+ \"parameters\": self.parameters.to_dict(),\n+ \"energy\": {\n+ \"data\": self.energy.data.tolist(),\n+ \"unit\": str(self.energy.unit),\n+ },\n+ \"values\": {\n+ \"data\": self.init_values.data.tolist(),\n+ \"unit\": str(self.values.unit),\n+ },\n+ }\n+\n+ @classmethod\n+ def from_dict(cls, data):\n+ energy = u.Quantity(data[\"energy\"][\"data\"], data[\"energy\"][\"unit\"])\n+ values = u.Quantity(data[\"values\"][\"data\"], data[\"values\"][\"unit\"])\n+ if \"parameters\" in data:\n+ parameters = Parameters.from_dict(data[\"parameters\"])\n+ return cls.from_parameters(parameters, energy=energy, values=values)\n+ else:\n+ return cls(energy=energy, values=values)\n+\n+\n class ExpCutoffPowerLawSpectralModel(SpectralModel):\n r\"\"\"Spectral exponential cutoff power-law model.\n \n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_io.py", "new_path": "gammapy/modeling/models/tests/test_io.py", "diff": "@@ -13,6 +13,7 @@ from gammapy.modeling.models import (\n BackgroundModel,\n Model,\n Models,\n+ PiecewiseBrokenPowerLawSpectralModel,\n )\n from gammapy.utils.scripts import read_yaml, write_yaml\n from gammapy.utils.testing import requires_data\n@@ -107,6 +108,31 @@ def test_sky_models_io(tmp_path):\n # or check serialised dict content\n \n \n+def test_PiecewiseBrokenPowerLawSpectralModel_io(tmp_path):\n+\n+ energy = [1, 3, 7, 10] * u.TeV\n+ values = [1, 5, 3, 0.5] * u.Unit(\"cm-2 s-1 TeV-1\")\n+ with pytest.raises(ValueError):\n+ PiecewiseBrokenPowerLawSpectralModel(\n+ energy=[1,] * u.TeV, values=[1, 5] * u.Unit(\"cm-2 s-1 TeV-1\")\n+ )\n+ with pytest.raises(ValueError):\n+ PiecewiseBrokenPowerLawSpectralModel(\n+ energy=[1,] * u.TeV, values=[1,] * u.Unit(\"cm-2 s-1 TeV-1\")\n+ )\n+ model = PiecewiseBrokenPowerLawSpectralModel(energy=energy, values=values)\n+ model.parameters[0].value = 2\n+ model_dict = model.to_dict()\n+ parnames = [_[\"name\"] for _ in model_dict[\"parameters\"]]\n+ for k in range(len(parnames)):\n+ assert parnames[k] == f\"norm{k}\"\n+\n+ new_model = PiecewiseBrokenPowerLawSpectralModel.from_dict(model_dict)\n+ assert_allclose(new_model.parameters[0].value, 2)\n+ assert_allclose(new_model.energy, energy)\n+ assert_allclose(new_model.values, [2, 5, 3, 0.5] * values.unit)\n+\n+\n @requires_data()\n def test_absorption_io(tmp_path):\n dominguez = Absorption.read_builtin(\"dominguez\")\n@@ -190,7 +216,10 @@ def make_all_models():\n yield Model.create(\n \"TemplateSpectralModel\", \"spectral\", energy=[1, 2] * u.cm, values=[3, 4] * u.cm\n ) # TODO: add unit validation?\n- yield Model.create(\"GaussianSpectralModel\", \"spectral\")\n+ yield Model.create(\n+ \"PiecewiseBrokenPowerLawSpectralModel\", energy=[1, 2] * u.cm, values=[3, 4] * u.cm\n+ )\n+ yield Model.create(\"GaussianSpectralModel\")\n # TODO: yield Model.create(\"AbsorbedSpectralModel\")\n # TODO: yield Model.create(\"NaimaSpectralModel\")\n # TODO: yield Model.create(\"ScaleSpectralModel\")\n" }, { "change_type": "MODIFY", "old_path": "gammapy/modeling/models/tests/test_spectral.py", "new_path": "gammapy/modeling/models/tests/test_spectral.py", "diff": "@@ -25,6 +25,7 @@ from gammapy.modeling.models import (\n SmoothBrokenPowerLawSpectralModel,\n SuperExpCutoffPowerLaw4FGLSpectralModel,\n TemplateSpectralModel,\n+ PiecewiseBrokenPowerLawSpectralModel,\n )\n from gammapy.utils.testing import (\n assert_quantity_allclose,\n@@ -273,6 +274,16 @@ TEST_MODELS = [\n integral_1_10TeV=u.Quantity(13.522782989735022, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(40.06681812966845, \"TeV cm-2 s-1\"),\n ),\n+ dict(\n+ name=\"pbpl\",\n+ model=PiecewiseBrokenPowerLawSpectralModel(\n+ energy=[1, 3, 7, 10] * u.TeV,\n+ values=[1, 5, 3, 0.5] * u.Unit(\"cm-2 s-1 TeV-1\"),\n+ ),\n+ val_at_2TeV=u.Quantity(2.76058404, \"cm-2 s-1 TeV-1\"),\n+ integral_1_10TeV=u.Quantity(24.757573885411876, \"cm-2 s-1\"),\n+ eflux_1_10TeV=u.Quantity(117.74087966682515, \"TeV cm-2 s-1\"),\n+ ),\n ]\n \n # Add compound models\n@@ -334,11 +345,12 @@ def test_models(spectrum):\n assert_quantity_allclose(model.e_peak, spectrum[\"e_peak\"], rtol=1e-2)\n \n # inverse for ConstantSpectralModel is irrelevant.\n- # inverse for Gaussian has a degeneracy\n+ # inverse for Gaussian and PiecewiseBrokenPowerLawSpectralModel have a degeneracy\n if not (\n isinstance(model, ConstantSpectralModel)\n or spectrum[\"name\"] == \"compound6\"\n or spectrum[\"name\"] == \"GaussianSpectralModel\"\n+ or spectrum[\"name\"] == \"pbpl\"\n ):\n assert_quantity_allclose(model.inverse(value), 2 * u.TeV, rtol=0.01)\n \n" } ]
b406fc46ffe0c426dd34cddb15e083ded421791d
kinverarity1/lasio
01.11.2021 16:26:02
MIT License
Add ignore_comments to documentation - Also change data parsing functions to use 'ignore_data_comments' rather than 'ignore_comments'. This clarifies the difference between the two flags since 'ignore_data_comments' is used in parsing the data section and 'ignore_comments' is used in parsing header sections.
[ { "change_type": "MODIFY", "old_path": "docs/source/header-section.rst", "new_path": "docs/source/header-section.rst", "diff": "@@ -264,6 +264,19 @@ Handling special cases of header lines\n lasio will do its best to read every line from the header section. Some examples\n follow for unusual formattings:\n \n+Comment lines mixed with header lines\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+lasio will, by default, treat header lines starting with a \"#\" hash string as a\n+comment line and ignore it. Spaces before the \"#\" are stripped off before\n+checking for the \"#\".\n+\n+To modify which strings indicate comment lines to ignore pass an\n+ignore_comments tuple to ``lasio.read()`` or ``lasio.examples.open()``.\n+\n+Example:\n+ ``lasio.read(file, ignore_comments=(\"#\", \"%MyComment\")``\n+\n Lines without periods\n ~~~~~~~~~~~~~~~~~~~~~\n \n" }, { "change_type": "MODIFY", "old_path": "lasio/las.py", "new_path": "lasio/las.py", "diff": "@@ -336,7 +336,7 @@ class LASFile(object):\n file_obj,\n (first_line, last_line),\n regexp_subs,\n- ignore_comments=ignore_data_comments,\n+ ignore_data_comments=ignore_data_comments,\n )\n \n # How many curves should the reader attempt to find?\n@@ -370,7 +370,7 @@ class LASFile(object):\n (first_line, last_line),\n regexp_subs,\n value_null_subs,\n- ignore_comments=ignore_data_comments,\n+ ignore_data_comments=ignore_data_comments,\n n_columns=reader_n_columns,\n dtypes=dtypes,\n line_splitter=line_splitter,\n@@ -392,7 +392,7 @@ class LASFile(object):\n (first_line, last_line),\n regexp_subs,\n value_null_subs,\n- ignore_comments=ignore_data_comments,\n+ ignore_data_comments=ignore_data_comments,\n n_columns=reader_n_columns,\n dtypes=dtypes,\n line_splitter=line_splitter,\n" }, { "change_type": "MODIFY", "old_path": "lasio/reader.py", "new_path": "lasio/reader.py", "diff": "@@ -343,7 +343,7 @@ def determine_section_type(section_title):\n return \"Header items\"\n \n \n-def inspect_data_section(file_obj, line_nos, regexp_subs, ignore_comments=\"#\"):\n+def inspect_data_section(file_obj, line_nos, regexp_subs, ignore_data_comments=\"#\"):\n \"\"\"Determine how many columns there are in the data section.\n \n Arguments:\n@@ -352,7 +352,7 @@ def inspect_data_section(file_obj, line_nos, regexp_subs, ignore_comments=\"#\"):\n regexp_subs (list): each item should be a tuple of the pattern and\n substitution string for a call to re.sub() on each line of the\n data section. See defaults.py READ_SUBS and NULL_SUBS for examples.\n- ignore_comments (str): lines beginning with this character will be ignored\n+ ignore_data_comments (str): lines beginning with this character will be ignored\n \n Returns: integer number of columns or -1 where they are different.\n \n@@ -366,7 +366,7 @@ def inspect_data_section(file_obj, line_nos, regexp_subs, ignore_comments=\"#\"):\n for i, line in enumerate(file_obj):\n line_no = line_no + 1\n line = line.strip(\"\\n\").strip()\n- if line.strip().startswith(ignore_comments):\n+ if line.strip().startswith(ignore_data_comments):\n continue\n else:\n for pattern, sub_str in regexp_subs:\n@@ -395,7 +395,7 @@ def read_data_section_iterative_normal_engine(\n line_nos,\n regexp_subs,\n value_null_subs,\n- ignore_comments,\n+ ignore_data_comments,\n n_columns,\n dtypes,\n line_splitter,\n@@ -410,7 +410,7 @@ def read_data_section_iterative_normal_engine(\n data section. See defaults.py READ_SUBS and NULL_SUBS for examples.\n value_null_subs (list): list of numerical values to be replaced by\n numpy.nan values.\n- ignore_comments (str): lines beginning with this character will be ignored\n+ ignore_data_comments (str): lines beginning with this character will be ignored\n n_columns (int): expected number of columns\n dtypes (list, \"auto\", False): list of expected data types for each column,\n (each data type can be specified as e.g. `int`,\n@@ -433,7 +433,7 @@ def read_data_section_iterative_normal_engine(\n def items(f, start_line_no, end_line_no):\n for line_no, line in enumerate(f, start=start_line_no+1):\n line = line.strip(\"\\n\").strip()\n- if line.startswith(ignore_comments):\n+ if line.startswith(ignore_data_comments):\n continue\n else:\n for pattern, sub_str in regexp_subs:\n@@ -668,8 +668,8 @@ def parse_header_items_section(\n mnemonic_case (str): 'preserve': keep the case of HeaderItem mnemonics\n 'upper': convert all HeaderItem mnemonics to uppercase\n 'lower': convert all HeaderItem mnemonics to lowercase\n- ignore_comments (False, True, or list): ignore lines starting with these\n- characters; by default True as '#'.\n+ ignore_comments (list): ignore lines starting with these characters; by\n+ default '#'.\n \n Returns:\n :class:`lasio.SectionItems`\n" } ]
7da9f5a965c9ba4606a199eec756e486b783c01c
mycroftai/mycroft-core
20.05.2017 21:28:05
Apache License 2.0
Add functionality to update listener/STT config When a new configuration is discovered the producer and consumer is shutdown, the config is read and a new producer-consumer pair is launched.
[ { "change_type": "MODIFY", "old_path": "mycroft/client/speech/listener.py", "new_path": "mycroft/client/speech/listener.py", "diff": "@@ -70,6 +70,10 @@ class AudioProducer(Thread):\n # http://stackoverflow.com/questions/10733903/pyaudio-input-overflowed\n self.emitter.emit(\"recognizer_loop:ioerror\", ex)\n \n+ def stop(self):\n+ self.state.running = False\n+ self.recognizer.stop()\n+\n \n class AudioConsumer(Thread):\n \"\"\"\n@@ -101,7 +105,7 @@ class AudioConsumer(Thread):\n \n if self.state.sleeping:\n self.wake_up(audio)\n- else:\n+ elif audio is not None:\n self.process(audio)\n \n # TODO: Localization\n@@ -179,7 +183,14 @@ class RecognizerLoopState(object):\n class RecognizerLoop(EventEmitter):\n def __init__(self):\n super(RecognizerLoop, self).__init__()\n+ self._load_config()\n+\n+ def _load_config(self):\n+ \"\"\"\n+ Load configuration parameters from configuration\n+ \"\"\"\n config = ConfigurationManager.get()\n+ self._config_hash = hash(str(config))\n lang = config.get('lang')\n self.config = config.get('listener')\n rate = self.config.get('sample_rate')\n@@ -213,13 +224,21 @@ class RecognizerLoop(EventEmitter):\n def start_async(self):\n self.state.running = True\n queue = Queue()\n- AudioProducer(self.state, queue, self.microphone,\n- self.remote_recognizer, self).start()\n- AudioConsumer(self.state, queue, self, STTFactory.create(),\n- self.wakeup_recognizer, self.mycroft_recognizer).start()\n+ self.producer = AudioProducer(self.state, queue, self.microphone,\n+ self.remote_recognizer, self)\n+ self.producer.start()\n+ self.consumer = AudioConsumer(self.state, queue, self,\n+ STTFactory.create(),\n+ self.wakeup_recognizer,\n+ self.mycroft_recognizer)\n+ self.consumer.start()\n \n def stop(self):\n self.state.running = False\n+ self.producer.stop()\n+ # wait for threads to shutdown\n+ self.producer.join()\n+ self.consumer.join()\n \n def mute(self):\n if self.microphone:\n@@ -240,6 +259,17 @@ class RecognizerLoop(EventEmitter):\n while self.state.running:\n try:\n time.sleep(1)\n+ if self._config_hash != hash(str(ConfigurationManager()\n+ .get())):\n+ LOG.debug('Config has changed, reloading...')\n+ self.reload()\n except KeyboardInterrupt as e:\n LOG.error(e)\n self.stop()\n+\n+ def reload(self):\n+ self.stop()\n+ # load config\n+ self._load_config()\n+ # restart\n+ self.start_async()\n" }, { "change_type": "MODIFY", "old_path": "mycroft/client/speech/mic.py", "new_path": "mycroft/client/speech/mic.py", "diff": "@@ -165,6 +165,7 @@ class ResponsiveRecognizer(speech_recognition.Recognizer):\n # check the config for the flag to save wake words.\n self.save_wake_words = listener_config.get('record_wake_words')\n self.mic_level_file = os.path.join(get_ipc_directory(), \"mic_level\")\n+ self._stop_signaled = False\n \n @staticmethod\n def record_sound_chunk(source):\n@@ -291,6 +292,9 @@ class ResponsiveRecognizer(speech_recognition.Recognizer):\n \n return False\n \n+ def stop(self):\n+ self._stop_signaled = True\n+\n def _wait_until_wake_word(self, source, sec_per_buffer):\n \"\"\"Listen continuously on source until a wake word is spoken\n \n@@ -324,10 +328,9 @@ class ResponsiveRecognizer(speech_recognition.Recognizer):\n \n counter = 0\n \n- while not said_wake_word:\n+ while not said_wake_word and not self._stop_signaled:\n if self._skip_wake_word():\n break\n-\n chunk = self.record_sound_chunk(source)\n \n energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)\n@@ -419,6 +422,8 @@ class ResponsiveRecognizer(speech_recognition.Recognizer):\n \n logger.debug(\"Waiting for wake word...\")\n self._wait_until_wake_word(source, sec_per_buffer)\n+ if self._stop_signaled:\n+ return\n \n logger.debug(\"Recording...\")\n emitter.emit(\"recognizer_loop:record_begin\")\n" } ]
6c4f485323cd6b8355f30a1b867102b06045b815
mycroftai/mycroft-core
16.01.2018 12:14:07
Apache License 2.0
Make handlers for single scheduled events one shot - add_event() now accepts the parameter once, registring the event as a one shot event. - remove_event for non-existing events is handled - added a test for this
[ { "change_type": "MODIFY", "old_path": "mycroft/skills/core.py", "new_path": "mycroft/skills/core.py", "diff": "@@ -596,24 +596,26 @@ class MycroftSkill(object):\n text = f.read().replace('{{', '{').replace('}}', '}')\n return text.format(**data or {}).split('\\n')\n \n- def add_event(self, name, handler, need_self=False):\n+ def add_event(self, name, handler, need_self=False, once=False):\n \"\"\"\n Create event handler for executing intent\n \n Args:\n name: IntentParser name\n handler: method to call\n- need_self: optional parameter, when called from a decorated\n- intent handler the function will need the self\n- variable passed as well.\n+ need_self: optional parameter, when called from a decorated\n+ intent handler the function will need the self\n+ variable passed as well.\n+ once: optional parameter, Event handler will be removed\n+ after it has been run once.\n \"\"\"\n \n def wrapper(message):\n try:\n # Indicate that the skill handler is starting\n- name = get_handler_name(handler)\n+ handler_name = get_handler_name(handler)\n self.emitter.emit(Message(\"mycroft.skill.handler.start\",\n- data={'handler': name}))\n+ data={'handler': handler_name}))\n \n stopwatch = Stopwatch()\n with stopwatch:\n@@ -653,24 +655,29 @@ class MycroftSkill(object):\n \n except Exception as e:\n # Convert \"MyFancySkill\" to \"My Fancy Skill\" for speaking\n- name = re.sub(\"([a-z])([A-Z])\", \"\\g<1> \\g<2>\", self.name)\n+ handler_name = re.sub(\"([a-z])([A-Z])\", \"\\g<1> \\g<2>\",\n+ self.name)\n # TODO: Localize\n- self.speak(\n- \"An error occurred while processing a request in \" +\n- name)\n+ self.speak(\"An error occurred while processing a request in \" +\n+ handler_name)\n LOG.error(\n \"An error occurred while processing a request in \" +\n self.name, exc_info=True)\n # indicate completion with exception\n self.emitter.emit(Message('mycroft.skill.handler.complete',\n- data={'handler': name,\n+ data={'handler': handler_name,\n 'exception': e.message}))\n # Indicate that the skill handler has completed\n self.emitter.emit(Message('mycroft.skill.handler.complete',\n- data={'handler': name}))\n+ data={'handler': handler_name}))\n+ if once:\n+ self.remove_event(name)\n \n if handler:\n- self.emitter.on(name, wrapper)\n+ if once:\n+ self.emitter.once(name, wrapper)\n+ else:\n+ self.emitter.on(name, wrapper)\n self.events.append((name, wrapper))\n \n def remove_event(self, name):\n@@ -682,8 +689,15 @@ class MycroftSkill(object):\n \"\"\"\n for _name, _handler in self.events:\n if name == _name:\n- self.events.remove((_name, _handler))\n- self.emitter.remove(_name, _handler)\n+ try:\n+ self.events.remove((_name, _handler))\n+ except ValueError:\n+ pass\n+ try:\n+ self.emitter.remove(_name, _handler)\n+ except ValueError:\n+ LOG.debug('{} is not registered in the emitter'.format(\n+ _name))\n \n def register_intent(self, intent_parser, handler, need_self=False):\n \"\"\"\n@@ -946,12 +960,12 @@ class MycroftSkill(object):\n Underlying method for schedle_event and schedule_repeating_event.\n Takes scheduling information and sends it of on the message bus.\n \"\"\"\n- data = data or {}\n if not name:\n name = self.name + handler.__name__\n name = self._unique_name(name)\n \n- self.add_event(name, handler, False)\n+ data = data or {}\n+ self.add_event(name, handler, once=not repeat)\n event_data = {}\n event_data['time'] = time.mktime(when.timetuple())\n event_data['event'] = name\n" }, { "change_type": "MODIFY", "old_path": "test/unittests/skills/core.py", "new_path": "test/unittests/skills/core.py", "diff": "@@ -447,7 +447,7 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(emitter)\n s.schedule_event(s.handler, datetime.now(), name='sched_handler1')\n # Check that the handler was registered with the emitter\n- self.assertEqual(emitter.on.call_args[0][0], '0:sched_handler1')\n+ self.assertEqual(emitter.once.call_args[0][0], '0:sched_handler1')\n self.assertTrue('0:sched_handler1' in zip(*s.events)[0])\n \n @mock.patch.object(Configuration, 'get')\n@@ -468,15 +468,40 @@ class MycroftSkillTest(unittest.TestCase):\n self.assertEqual(emitter.remove.call_args[0][0], '0:sched_handler1')\n self.assertTrue('0:sched_handler1' not in zip(*s.events)[0])\n \n+ @mock.patch.object(Configuration, 'get')\n+ def test_run_scheduled_event(self, mock_config_get):\n+ test_config = {\n+ 'skills': {\n+ }\n+ }\n+ mock_config_get.return_value = test_config\n+ emitter = mock.MagicMock()\n+ s = TestSkill1()\n+ with mock.patch.object(s, '_settings',\n+ create=True, value=mock.MagicMock()):\n+ s.bind(emitter)\n+ s.schedule_event(s.handler, datetime.now(), name='sched_handler1')\n+ # Check that the handler was registered with the emitter\n+ emitter.once.call_args[0][1](Message('message'))\n+ # Check that the handler was run\n+ self.assertTrue(s.handler_run)\n+ # Check that the handler was removed from the list of registred\n+ # handler\n+ self.assertTrue('0:sched_handler1' not in zip(*s.events)[0])\n+\n \n class TestSkill1(MycroftSkill):\n+ def __init__(self):\n+ super(TestSkill1, self).__init__()\n+ self.handler_run = False\n+\n \"\"\" Test skill for normal intent builder syntax \"\"\"\n def initialize(self):\n i = IntentBuilder('a').require('Keyword').build()\n self.register_intent(i, self.handler)\n \n def handler(self, message):\n- pass\n+ self.handler_run = True\n \n def stop(self):\n pass\n" } ]
07ce2d98d0c069e2d4d04d1f9e5bc21e0e520fee
mycroftai/mycroft-core
28.02.2018 18:53:56
Apache License 2.0
Use function attributes for intent decorators This prevents needing to use a shared list which misbehaves when multiple skills initialize at once
[ { "change_type": "MODIFY", "old_path": "mycroft/skills/core.py", "new_path": "mycroft/skills/core.py", "diff": "@@ -18,7 +18,6 @@ import sys\n import time\n import csv\n import inspect\n-from functools import wraps\n from inspect import getargspec\n from datetime import datetime, timedelta\n \n@@ -126,7 +125,7 @@ def load_skill(skill_descriptor, emitter, skill_id, BLACKLISTED_SKILLS=None):\n # The very first time a skill is run, speak the intro\n first_run = skill.settings.get(\"__mycroft_skill_firstrun\", True)\n if first_run:\n- LOG.info(\"First run of \"+skill_descriptor[\"name\"])\n+ LOG.info(\"First run of \" + skill_descriptor[\"name\"])\n skill.settings[\"__mycroft_skill_firstrun\"] = False\n skill.settings.store()\n intro = skill.get_intro_message()\n@@ -166,21 +165,16 @@ def get_handler_name(handler):\n return name\n \n \n-# Lists used when adding skill handlers using decorators\n-_intent_list = []\n-_intent_file_list = []\n-\n-\n def intent_handler(intent_parser):\n \"\"\" Decorator for adding a method as an intent handler. \"\"\"\n \n def real_decorator(func):\n- @wraps(func)\n- def handler_method(*args, **kwargs):\n- return func(*args, **kwargs)\n-\n- _intent_list.append((intent_parser, func))\n- return handler_method\n+ # Store the intent_parser inside the function\n+ # This will be used later to call register_intent\n+ if not hasattr(func, 'intents'):\n+ func.intents = []\n+ func.intents.append(intent_parser)\n+ return func\n \n return real_decorator\n \n@@ -189,12 +183,12 @@ def intent_file_handler(intent_file):\n \"\"\" Decorator for adding a method as an intent file handler. \"\"\"\n \n def real_decorator(func):\n- @wraps(func)\n- def handler_method(*args, **kwargs):\n- return func(*args, **kwargs)\n-\n- _intent_file_list.append((intent_file, func))\n- return handler_method\n+ # Store the intent_file inside the function\n+ # This will be used later to call register_intent_file\n+ if not hasattr(func, 'intent_files'):\n+ func.intent_files = []\n+ func.intent_files.append(intent_file)\n+ return func\n \n return real_decorator\n \n@@ -455,14 +449,20 @@ class MycroftSkill(object):\n def _register_decorated(self):\n \"\"\"\n Register all intent handlers that have been decorated with an intent.\n+\n+ Looks for all functions that have been marked by a decorator\n+ and read the intent data from them\n \"\"\"\n- global _intent_list, _intent_file_list\n- for intent_parser, handler in _intent_list:\n- self.register_intent(intent_parser, handler, need_self=True)\n- for intent_file, handler in _intent_file_list:\n- self.register_intent_file(intent_file, handler, need_self=True)\n- _intent_list = []\n- _intent_file_list = []\n+ for attr_name in dir(self):\n+ method = getattr(self, attr_name)\n+\n+ if hasattr(method, 'intents'):\n+ for intent in getattr(method, 'intents'):\n+ self.register_intent(intent, method)\n+\n+ if hasattr(method, 'intent_files'):\n+ for intent_file in getattr(method, 'intent_files'):\n+ self.register_intent_file(intent_file, method)\n \n def translate(self, text, data=None):\n \"\"\"\n@@ -572,9 +572,8 @@ class MycroftSkill(object):\n Args:\n name: IntentParser name\n handler: method to call\n- need_self: optional parameter, when called from a\n- decorated intent handler the function will\n- need the self variable passed as well.\n+ need_self: optional parameter, pass if giving a local\n+ function or lambda (not defined in the class)\n once: optional parameter, Event handler will be\n removed after it has been run once.\n handler_info: base message when reporting skill event handler\n@@ -679,7 +678,7 @@ class MycroftSkill(object):\n removed = True\n return removed\n \n- def register_intent(self, intent_parser, handler, need_self=False):\n+ def register_intent(self, intent_parser, handler):\n \"\"\"\n Register an Intent with the intent service.\n \n@@ -687,9 +686,6 @@ class MycroftSkill(object):\n intent_parser: Intent or IntentBuilder object to parse\n utterance for the handler.\n handler: function to register with intent\n- need_self: optional parameter, when called from a decorated\n- intent handler the function will need the self\n- variable passed as well.\n \"\"\"\n if type(intent_parser) == IntentBuilder:\n intent_parser = intent_parser.build()\n@@ -701,10 +697,10 @@ class MycroftSkill(object):\n munge_intent_parser(intent_parser, name, self.skill_id)\n self.emitter.emit(Message(\"register_intent\", intent_parser.__dict__))\n self.registered_intents.append((name, intent_parser))\n- self.add_event(intent_parser.name, handler, need_self,\n+ self.add_event(intent_parser.name, handler, False,\n 'mycroft.skill.handler')\n \n- def register_intent_file(self, intent_file, handler, need_self=False):\n+ def register_intent_file(self, intent_file, handler):\n \"\"\"\n Register an Intent file with the intent service.\n For example:\n@@ -729,14 +725,13 @@ class MycroftSkill(object):\n intent_file: name of file that contains example queries\n that should activate the intent\n handler: function to register with intent\n- need_self: use for decorator. See <register_intent>\n \"\"\"\n name = str(self.skill_id) + ':' + intent_file\n self.emitter.emit(Message(\"padatious:register_intent\", {\n \"file_name\": join(self.vocab_dir, intent_file),\n \"name\": name\n }))\n- self.add_event(name, handler, need_self, 'mycroft.skill.handler')\n+ self.add_event(name, handler, False, 'mycroft.skill.handler')\n \n def register_entity_file(self, entity_file):\n \"\"\"\n@@ -1115,6 +1110,7 @@ class FallbackSkill(MycroftSkill):\n ident = message.context['ident']\n report_timing(ident, 'fallback_handler', stopwatch,\n {'handler': handler_name})\n+\n return handler\n \n @classmethod\n@@ -1137,11 +1133,13 @@ class FallbackSkill(MycroftSkill):\n register a fallback with the list of fallback handlers\n and with the list of handlers registered by this instance\n \"\"\"\n+\n def wrapper(*args, **kwargs):\n if handler(*args, **kwargs):\n self.make_active()\n return True\n return False\n+\n self.instance_fallback_handlers.append(wrapper)\n self._register_fallback(handler, priority)\n \n" } ]
fc8424c9ee9c83f4962e171699f13c39407637cc
mycroftai/mycroft-core
10.05.2018 18:01:07
Apache License 2.0
Make skill ids use skill folder This is necessary because in Python 3, hash(x) changes every single start of the application. Using the skill folder makes it consistent. In addition, the skill folder makes it easier to debug parts of the application in comparison to using something like an md5sum
[ { "change_type": "MODIFY", "old_path": "mycroft/skills/core.py", "new_path": "mycroft/skills/core.py", "diff": "@@ -37,7 +37,7 @@ from mycroft.filesystem import FileSystemAccess\n from mycroft.messagebus.message import Message\n from mycroft.metrics import report_metric, report_timing, Stopwatch\n from mycroft.skills.settings import SkillSettings\n-from mycroft.skills.skill_data import (load_vocabulary, load_regex, to_letters,\n+from mycroft.skills.skill_data import (load_vocabulary, load_regex, to_alnum,\n munge_regex, munge_intent_parser)\n from mycroft.util import resolve_resource_file\n from mycroft.util.log import LOG\n@@ -63,13 +63,13 @@ def unmunge_message(message, skill_id):\n \n Args:\n message (Message): Intent result message\n- skill_id (int): skill identifier\n+ skill_id (str): skill identifier\n \n Returns:\n Message without clear keywords\n \"\"\"\n if isinstance(message, Message) and isinstance(message.data, dict):\n- skill_id = to_letters(skill_id)\n+ skill_id = to_alnum(skill_id)\n for key in message.data:\n if key[:len(skill_id)] == skill_id:\n new_key = key[len(skill_id):]\n@@ -807,7 +807,7 @@ class MycroftSkill(object):\n raise ValueError('context should be a string')\n if not isinstance(word, str):\n raise ValueError('word should be a string')\n- context = to_letters(self.skill_id) + context\n+ context = to_alnum(self.skill_id) + context\n self.emitter.emit(Message('add_context',\n {'context': context, 'word': word}))\n \n@@ -827,7 +827,7 @@ class MycroftSkill(object):\n entity_type: Intent handler entity to tie the word to\n \"\"\"\n self.emitter.emit(Message('register_vocab', {\n- 'start': entity, 'end': to_letters(self.skill_id) + entity_type\n+ 'start': entity, 'end': to_alnum(self.skill_id) + entity_type\n }))\n \n def register_regex(self, regex_str):\n" }, { "change_type": "MODIFY", "old_path": "mycroft/skills/intent_service.py", "new_path": "mycroft/skills/intent_service.py", "diff": "@@ -172,7 +172,7 @@ class IntentService(object):\n Returns:\n (str) Skill name or the skill id if the skill wasn't found\n \"\"\"\n- return self.skill_names.get(int(skill_id), skill_id)\n+ return self.skill_names.get(skill_id, skill_id)\n \n def reset_converse(self, message):\n \"\"\"Let skills know there was a problem with speech recognition\"\"\"\n@@ -357,7 +357,7 @@ class IntentService(object):\n if best_intent and best_intent.get('confidence', 0.0) > 0.0:\n self.update_context(best_intent)\n # update active skills\n- skill_id = int(best_intent['intent_type'].split(\":\")[0])\n+ skill_id = best_intent['intent_type'].split(\":\")[0]\n self.add_active_skill(skill_id)\n return best_intent\n \n" }, { "change_type": "MODIFY", "old_path": "mycroft/skills/main.py", "new_path": "mycroft/skills/main.py", "diff": "@@ -311,7 +311,7 @@ class SkillManager(Thread):\n skill_path = skill_path.rstrip('/')\n skill = self.loaded_skills.setdefault(skill_path, {})\n skill.update({\n- \"id\": hash(skill_path),\n+ \"id\": basename(skill_path),\n \"path\": skill_path\n })\n \n@@ -452,7 +452,7 @@ class SkillManager(Thread):\n If supported, the conversation is invoked.\n \"\"\"\n \n- skill_id = int(message.data[\"skill_id\"])\n+ skill_id = message.data[\"skill_id\"]\n utterances = message.data[\"utterances\"]\n lang = message.data[\"lang\"]\n \n" }, { "change_type": "MODIFY", "old_path": "mycroft/skills/skill_data.py", "new_path": "mycroft/skills/skill_data.py", "diff": "@@ -80,7 +80,7 @@ def load_vocabulary(basedir, emitter, skill_id):\n \"\"\"\n for vocab_file in listdir(basedir):\n if vocab_file.endswith(\".voc\"):\n- vocab_type = to_letters(skill_id) + splitext(vocab_file)[0]\n+ vocab_type = to_alnum(skill_id) + splitext(vocab_file)[0]\n load_vocab_from_file(\n join(basedir, vocab_file), vocab_type, emitter)\n \n@@ -92,28 +92,24 @@ def load_regex(basedir, emitter, skill_id):\n basedir (str): path of directory to load from\n emitter (messagebus emitter): websocket used to send the vocab to\n the intent service\n- skill_id (int): skill identifier\n+ skill_id (str): skill identifier\n \"\"\"\n for regex_type in listdir(basedir):\n if regex_type.endswith(\".rx\"):\n- load_regex_from_file(\n- join(basedir, regex_type), emitter, skill_id)\n+ load_regex_from_file(join(basedir, regex_type), emitter, skill_id)\n \n \n-def to_letters(number):\n- \"\"\"Convert number to string of letters.\n+def to_alnum(skill_id):\n+ \"\"\"Convert a skill id to only alphanumeric characters\n \n- 0 -> A, 1 -> B, etc.\n+ Non alpha-numeric characters are converted to \"_\"\n \n Args:\n- number (int): number to be converted\n+ skill_id (str): identifier to be converted\n Returns:\n (str) String of letters\n \"\"\"\n- ret = ''\n- for n in str(number).strip('-'):\n- ret += chr(65 + int(n))\n- return ret\n+ return ''.join(c if c.isalnum() else '_' for c in str(skill_id))\n \n \n def munge_regex(regex, skill_id):\n@@ -121,11 +117,11 @@ def munge_regex(regex, skill_id):\n \n Args:\n regex (str): regex string\n- skill_id (int): skill identifier\n+ skill_id (str): skill identifier\n Returns:\n (str) munged regex\n \"\"\"\n- base = '(?P<' + to_letters(skill_id)\n+ base = '(?P<' + to_alnum(skill_id)\n return base.join(regex.split('(?P<'))\n \n \n@@ -150,7 +146,7 @@ def munge_intent_parser(intent_parser, name, skill_id):\n intent_parser.name = name\n \n # Munge keywords\n- skill_id = to_letters(skill_id)\n+ skill_id = to_alnum(skill_id)\n # Munge required keyword\n reqs = []\n for i in intent_parser.requires:\n" }, { "change_type": "MODIFY", "old_path": "test/unittests/skills/core.py", "new_path": "test/unittests/skills/core.py", "diff": "@@ -73,17 +73,18 @@ class MycroftSkillTest(unittest.TestCase):\n \n def check_regex_from_file(self, filename, result_list=None):\n result_list = result_list or []\n- load_regex_from_file(join(self.regex_path, filename), self.emitter, 0)\n+ regex_file = join(self.regex_path, filename)\n+ load_regex_from_file(regex_file, self.emitter, 'A')\n self.check_emitter(result_list)\n \n def check_vocab(self, path, result_list=None):\n result_list = result_list or []\n- load_vocabulary(path, self.emitter, 0)\n+ load_vocabulary(path, self.emitter, 'A')\n self.check_emitter(result_list)\n \n def check_regex(self, path, result_list=None):\n result_list = result_list or []\n- load_regex(path, self.emitter, 0)\n+ load_regex(path, self.emitter, 'A')\n self.check_emitter(result_list)\n \n def check_emitter(self, result_list):\n@@ -231,7 +232,7 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(self.emitter)\n s.initialize()\n expected = [{'at_least_one': [],\n- 'name': '0:a',\n+ 'name': 'A:a',\n 'optional': [],\n 'requires': [('AKeyword', 'AKeyword')]}]\n self.check_register_intent(expected)\n@@ -241,7 +242,7 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(self.emitter)\n s.initialize()\n expected = [{'at_least_one': [],\n- 'name': '0:a',\n+ 'name': 'A:a',\n 'optional': [],\n 'requires': [('AKeyword', 'AKeyword')]}]\n \n@@ -260,7 +261,7 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(self.emitter)\n s.initialize()\n expected = [{'at_least_one': [],\n- 'name': '0:a',\n+ 'name': 'A:a',\n 'optional': [],\n 'requires': [('AKeyword', 'AKeyword')]}]\n self.check_register_intent(expected)\n@@ -334,12 +335,13 @@ class MycroftSkillTest(unittest.TestCase):\n sys.path.append(abspath(dirname(__file__)))\n SimpleSkill5 = __import__('decorator_test_skill').TestSkill\n s = SimpleSkill5()\n+ s.skill_id = 'A'\n s.vocab_dir = join(dirname(__file__), 'intent_file')\n s.bind(self.emitter)\n s.initialize()\n s._register_decorated()\n expected = [{'at_least_one': [],\n- 'name': '0:a',\n+ 'name': 'A:a',\n 'optional': [],\n 'requires': [('AKeyword', 'AKeyword')]},\n {\n@@ -447,8 +449,8 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(emitter)\n s.schedule_event(s.handler, datetime.now(), name='sched_handler1')\n # Check that the handler was registered with the emitter\n- self.assertEqual(emitter.once.call_args[0][0], '0:sched_handler1')\n- self.assertTrue('0:sched_handler1' in [e[0] for e in s.events])\n+ self.assertEqual(emitter.once.call_args[0][0], 'A:sched_handler1')\n+ self.assertTrue('A:sched_handler1' in [e[0] for e in s.events])\n \n @mock.patch.dict(Configuration._Configuration__config, BASE_CONF)\n def test_remove_scheduled_event(self):\n@@ -457,11 +459,11 @@ class MycroftSkillTest(unittest.TestCase):\n s.bind(emitter)\n s.schedule_event(s.handler, datetime.now(), name='sched_handler1')\n # Check that the handler was registered with the emitter\n- self.assertTrue('0:sched_handler1' in [e[0] for e in s.events])\n+ self.assertTrue('A:sched_handler1' in [e[0] for e in s.events])\n s.cancel_scheduled_event('sched_handler1')\n # Check that the handler was removed\n- self.assertEqual(emitter.remove.call_args[0][0], '0:sched_handler1')\n- self.assertTrue('0:sched_handler1' not in [e[0] for e in s.events])\n+ self.assertEqual(emitter.remove.call_args[0][0], 'A:sched_handler1')\n+ self.assertTrue('A:sched_handler1' not in [e[0] for e in s.events])\n \n @mock.patch.dict(Configuration._Configuration__config, BASE_CONF)\n def test_run_scheduled_event(self):\n@@ -477,10 +479,16 @@ class MycroftSkillTest(unittest.TestCase):\n self.assertTrue(s.handler_run)\n # Check that the handler was removed from the list of registred\n # handler\n- self.assertTrue('0:sched_handler1' not in [e[0] for e in s.events])\n+ self.assertTrue('A:sched_handler1' not in [e[0] for e in s.events])\n \n \n-class SimpleSkill1(MycroftSkill):\n+class _TestSkill(MycroftSkill):\n+ def __init__(self):\n+ super().__init__()\n+ self.skill_id = 'A'\n+\n+\n+class SimpleSkill1(_TestSkill):\n def __init__(self):\n super(SimpleSkill1, self).__init__()\n self.handler_run = False\n@@ -497,8 +505,10 @@ class SimpleSkill1(MycroftSkill):\n pass\n \n \n-class SimpleSkill2(MycroftSkill):\n+class SimpleSkill2(_TestSkill):\n \"\"\" Test skill for intent builder without .build() \"\"\"\n+ skill_id = 'A'\n+\n def initialize(self):\n i = IntentBuilder('a').require('Keyword')\n self.register_intent(i, self.handler)\n@@ -510,8 +520,10 @@ class SimpleSkill2(MycroftSkill):\n pass\n \n \n-class SimpleSkill3(MycroftSkill):\n+class SimpleSkill3(_TestSkill):\n \"\"\" Test skill for invalid Intent for register_intent \"\"\"\n+ skill_id = 'A'\n+\n def initialize(self):\n self.register_intent('string', self.handler)\n \n@@ -522,8 +534,10 @@ class SimpleSkill3(MycroftSkill):\n pass\n \n \n-class SimpleSkill4(MycroftSkill):\n+class SimpleSkill4(_TestSkill):\n \"\"\" Test skill for padatious intent \"\"\"\n+ skill_id = 'A'\n+\n def initialize(self):\n self.register_intent_file('test.intent', self.handler)\n self.register_entity_file('test_ent.entity')\n" } ]
1c3543f5e5d60b5eb3d00f52e6a50d329de250fc
mycroftai/mycroft-core
09.01.2019 18:27:02
Apache License 2.0
Support for audio seek The audioservice can now jump forward and backward in the audio stream/file The functionality is accessed via the audioservice class's seek_forward(), seek_backward() and seek() methods
[ { "change_type": "MODIFY", "old_path": "mycroft/audio/audioservice.py", "new_path": "mycroft/audio/audioservice.py", "diff": "@@ -187,6 +187,8 @@ class AudioService:\n self.bus.on('mycroft.audio.service.next', self._next)\n self.bus.on('mycroft.audio.service.prev', self._prev)\n self.bus.on('mycroft.audio.service.track_info', self._track_info)\n+ self.bus.on('mycroft.audio.service.seek_forward', self._seek_forward)\n+ self.bus.on('mycroft.audio.service.seek_backward', self._seek_backward)\n self.bus.on('recognizer_loop:audio_output_start', self._lower_volume)\n self.bus.on('recognizer_loop:record_begin', self._lower_volume)\n self.bus.on('recognizer_loop:audio_output_end', self._restore_volume)\n@@ -421,6 +423,28 @@ class AudioService:\n self.bus.emit(Message('mycroft.audio.service.track_info_reply',\n data=track_info))\n \n+ def _seek_forward(self, message):\n+ \"\"\"\n+ Handle message bus command to skip X seconds\n+\n+ Args:\n+ message: message bus message\n+ \"\"\"\n+ seconds = message.data.get(\"seconds\", 1)\n+ if self.current:\n+ self.current.seek_forward(seconds)\n+\n+ def _seek_backward(self, message):\n+ \"\"\"\n+ Handle message bus command to rewind X seconds\n+\n+ Args:\n+ message: message bus message\n+ \"\"\"\n+ seconds = message.data.get(\"seconds\", 1)\n+ if self.current:\n+ self.current.seek_backward(seconds)\n+\n def setup_pulseaudio_handlers(self, pulse_choice=None):\n \"\"\"\n Select functions for handling lower volume/restore of\n@@ -455,6 +479,10 @@ class AudioService:\n self.bus.remove('mycroft.audio.service.next', self._next)\n self.bus.remove('mycroft.audio.service.prev', self._prev)\n self.bus.remove('mycroft.audio.service.track_info', self._track_info)\n+ self.bus.remove('mycroft.audio.service.seek_forward',\n+ self._seek_forward)\n+ self.bus.remove('mycroft.audio.service.seek_backward',\n+ self._seek_backward)\n self.bus.remove('recognizer_loop:audio_output_start',\n self._lower_volume)\n self.bus.remove('recognizer_loop:record_begin', self._lower_volume)\n" }, { "change_type": "MODIFY", "old_path": "mycroft/audio/services/__init__.py", "new_path": "mycroft/audio/services/__init__.py", "diff": "@@ -115,6 +115,26 @@ class AudioBackend:\n \"\"\"\n pass\n \n+ @abstractmethod\n+ def seek_forward(self, seconds=1):\n+ \"\"\"\n+ skip X seconds\n+\n+ Args:\n+ seconds (int): number of seconds to seek, if negative rewind\n+ \"\"\"\n+ pass\n+\n+ @abstractmethod\n+ def seek_backward(self, seconds=1):\n+ \"\"\"\n+ rewind X seconds\n+\n+ Args:\n+ seconds (int): number of seconds to seek, if negative rewind\n+ \"\"\"\n+ pass\n+\n def track_info(self):\n \"\"\"\n Fetch info about current playing track.\n" }, { "change_type": "MODIFY", "old_path": "mycroft/audio/services/vlc/__init__.py", "new_path": "mycroft/audio/services/vlc/__init__.py", "diff": "@@ -120,6 +120,28 @@ class VlcService(AudioBackend):\n ret['name'] = t.get_meta(meta.Title)\n return ret\n \n+ def seek_forward(self, seconds=1):\n+ \"\"\"\n+ skip X seconds\n+\n+ Args:\n+ seconds (int): number of seconds to seek, if negative rewind\n+ \"\"\"\n+ seconds = seconds * 1000\n+ current_time = self.player.get_time()\n+ self.player.set_time(current_time + seconds)\n+\n+ def seek_backward(self, seconds=1):\n+ \"\"\"\n+ rewind X seconds\n+\n+ Args:\n+ seconds (int): number of seconds to seek, if negative rewind\n+ \"\"\"\n+ seconds = seconds * 1000\n+ current_time = self.player.get_time()\n+ self.player.set_time(current_time - seconds)\n+\n \n def load_service(base_config, bus):\n backends = base_config.get('backends', [])\n" }, { "change_type": "MODIFY", "old_path": "mycroft/skills/audioservice.py", "new_path": "mycroft/skills/audioservice.py", "diff": "@@ -121,6 +121,38 @@ class AudioService:\n \"\"\" Resume paused playback. \"\"\"\n self.bus.emit(Message('mycroft.audio.service.resume'))\n \n+ def seek(self, seconds=1):\n+ \"\"\"\n+ seek X seconds\n+\n+ Args:\n+ seconds (int): number of seconds to seek, if negative rewind\n+ \"\"\"\n+ if seconds < 0:\n+ self.seek_backward(abs(seconds))\n+ else:\n+ self.seek_forward(seconds)\n+\n+ def seek_forward(self, seconds=1):\n+ \"\"\"\n+ skip ahead X seconds\n+\n+ Args:\n+ seconds (int): number of seconds to skip\n+ \"\"\"\n+ self.bus.emit(Message('mycroft.audio.service.seek_forward',\n+ {\"seconds\": seconds}))\n+\n+ def seek_backward(self, seconds=1):\n+ \"\"\"\n+ rewind X seconds\n+\n+ Args:\n+ seconds (int): number of seconds to rewind\n+ \"\"\"\n+ self.bus.emit(Message('mycroft.audio.service.seek_backward',\n+ {\"seconds\": seconds}))\n+\n def track_info(self):\n \"\"\" Request information of current playing track.\n \n" } ]
f6347ae47c872b40339d9565a9cb29da5bca8716
mycroftai/mycroft-core
12.04.2019 18:06:03
Apache License 2.0
Replace hashed meta with skill_gid as identifier This also removes the notion of an owner skill and all skills may update settings on the server.
[ { "change_type": "MODIFY", "old_path": "mycroft/skills/settings.py", "new_path": "mycroft/skills/settings.py", "diff": "@@ -216,34 +216,12 @@ class SkillSettings(dict):\n except RequestException:\n return\n \n- hashed_meta = self._get_meta_hash(settings_meta)\n- skill_settings = self._request_other_settings(hashed_meta)\n- # if hash is new then there is a diff version of settingsmeta\n- if self._is_new_hash(hashed_meta):\n- # first look at all other devices on user account to see\n- # if the settings exist. if it does then sync with device\n- if skill_settings:\n- # not_owner flags that this settings is loaded from\n- # another device. If a skill settings doesn't have\n- # not_owner, then the skill is created from that device\n- self['not_owner'] = True\n- self.save_skill_settings(skill_settings)\n- else: # upload skill settings if\n- uuid = self._load_uuid()\n- if uuid is not None:\n- self._delete_metadata(uuid)\n- self._upload_meta(settings_meta, hashed_meta)\n- else: # hash is not new\n- if skill_settings is not None:\n- self['not_owner'] = True\n- self.save_skill_settings(skill_settings)\n- else:\n- settings = self._request_my_settings(hashed_meta)\n- if settings is None:\n- # metadata got deleted from Home, send up\n- self._upload_meta(settings_meta, hashed_meta)\n- else:\n- self.save_skill_settings(settings)\n+ settings = self._request_my_settings(self.skill_gid)\n+ if settings is None:\n+ # metadata got deleted from Home, send up\n+ self._upload_meta(settings_meta, self.skill_gid)\n+ else:\n+ self.save_skill_settings(settings)\n self._complete_intialization = True\n \n @property\n@@ -323,15 +301,15 @@ class SkillSettings(dict):\n Args:\n skill_settings (dict): skill\n \"\"\"\n- if self._is_new_hash(skill_settings['identifier']):\n- self._save_uuid(skill_settings['uuid'])\n- self._save_hash(skill_settings['identifier'])\n if 'skillMetadata' in skill_settings:\n sections = skill_settings['skillMetadata']['sections']\n for section in sections:\n for field in section[\"fields\"]:\n if \"name\" in field and \"value\" in field:\n- self[field['name']] = field['value']\n+ # Bypass the change lock to allow server to update\n+ # during skill init\n+ super(SkillSettings, self).__setitem__(field['name'],\n+ field['value'])\n self.store()\n \n def _load_uuid(self):\n@@ -392,90 +370,33 @@ class SkillSettings(dict):\n meta['skillMetadata']['sections'] = sections\n return meta\n \n- def _upload_meta(self, settings_meta, hashed_meta):\n+ def _upload_meta(self, settings_meta, identifier):\n \"\"\" uploads the new meta data to settings with settings migration\n \n Args:\n- settings_meta (dict): from settingsmeta.json or settingsmeta.yaml\n- hashed_meta (str): {skill-folder}-settinsmeta.json\n+ settings_meta (dict): settingsmeta.json or settingsmeta.yaml\n+ identifier (str): identifier for skills meta data\n \"\"\"\n meta = self._migrate_settings(settings_meta)\n- meta['identifier'] = str(hashed_meta)\n+ meta['identifier'] = identifier\n response = self._send_settings_meta(meta)\n- if response and 'uuid' in response:\n- self._save_uuid(response['uuid'])\n- if 'not_owner' in self:\n- del self['not_owner']\n- self._save_hash(hashed_meta)\n \n def hash(self, string):\n \"\"\" md5 hasher for consistency across cpu architectures \"\"\"\n return hashlib.md5(bytes(string, 'utf-8')).hexdigest()\n \n- def _get_meta_hash(self, settings_meta):\n- \"\"\" Gets the hash of skill\n-\n- Args:\n- settings_meta (dict): settingsmeta object\n- Returns:\n- _hash (str): hashed to identify skills\n- \"\"\"\n- _hash = self.hash(json.dumps(settings_meta, sort_keys=True) +\n- self._user_identity)\n- return \"{}--{}\".format(self.name, _hash)\n-\n- def _save_hash(self, hashed_meta):\n- \"\"\" Saves hashed_meta to settings directory.\n-\n- Args:\n- hashed_meta (str): hash of new settingsmeta\n- \"\"\"\n- directory = self.config.get(\"skills\")[\"directory\"]\n- directory = join(directory, self.name)\n- directory = expanduser(directory)\n- hash_file = join(directory, 'hash')\n- os.makedirs(directory, exist_ok=True)\n- with open(hash_file, 'w') as f:\n- f.write(hashed_meta)\n-\n- def _is_new_hash(self, hashed_meta):\n- \"\"\" Check if stored hash is the same as current.\n-\n- If the hashed file does not exist, usually in the\n- case of first load, then the create it and return True\n-\n- Args:\n- hashed_meta (str): hash of metadata and uuid of device\n- Returns:\n- bool: True if hash is new, otherwise False\n- \"\"\"\n- directory = self.config.get(\"skills\")[\"directory\"]\n- directory = join(directory, self.name)\n- directory = expanduser(directory)\n- hash_file = join(directory, 'hash')\n- if isfile(hash_file):\n- with open(hash_file, 'r') as f:\n- current_hash = f.read()\n- return False if current_hash == str(hashed_meta) else True\n- return True\n-\n def update_remote(self):\n \"\"\" update settings state from server \"\"\"\n- skills_settings = None\n settings_meta = self._load_settings_meta()\n if settings_meta is None:\n return\n- hashed_meta = self._get_meta_hash(settings_meta)\n- if self.get('not_owner'):\n- skills_settings = self._request_other_settings(hashed_meta)\n- if not skills_settings:\n- skills_settings = self._request_my_settings(hashed_meta)\n+ skills_settings = self._request_my_settings(self.skill_gid)\n if skills_settings is not None:\n self.save_skill_settings(skills_settings)\n self.store()\n else:\n settings_meta = self._load_settings_meta()\n- self._upload_meta(settings_meta, hashed_meta)\n+ self._upload_meta(settings_meta, self.skill_gid)\n \n def _init_blank_meta(self):\n \"\"\" Send blank settingsmeta to remote. \"\"\"\n@@ -599,10 +520,11 @@ class SkillSettings(dict):\n with the identifier\n \n Args:\n- identifier (str): a hashed_meta\n+ identifier (str): identifier (skill_gid)\n Returns:\n skill_settings (dict or None): returns a dict if matches\n \"\"\"\n+ print(\"GETTING SETTINGS FOR {}\".format(self.name))\n settings = self._request_settings()\n if settings:\n # this loads the settings into memory for use in self.store\n@@ -612,7 +534,8 @@ class SkillSettings(dict):\n self._type_cast(skill_settings, to_platform='core')\n self._remote_settings = skill_settings\n return skill_settings\n- return None\n+ else:\n+ return None\n \n def _request_settings(self):\n \"\"\" Get all skill settings for this device from server.\n@@ -631,27 +554,6 @@ class SkillSettings(dict):\n settings = [skills for skills in settings if skills is not None]\n return settings\n \n- def _request_other_settings(self, identifier):\n- \"\"\" Retrieve skill settings from other devices by identifier\n-\n- Args:\n- identifier (str): identifier for this skill\n- Returns:\n- settings (dict or None): the retrieved settings or None\n- \"\"\"\n- path = \\\n- \"/\" + self._device_identity + \"/userSkill?identifier=\" + identifier\n- try:\n- user_skill = self.api.request({\"method\": \"GET\", \"path\": path})\n- except RequestException:\n- # Some kind of Timeout, connection HTTPError, etc.\n- user_skill = None\n- if not user_skill:\n- return None\n- else:\n- settings = self._type_cast(user_skill[0], to_platform='core')\n- return settings\n-\n def _put_metadata(self, settings_meta):\n \"\"\" PUT settingsmeta to backend to be configured in server.\n used in place of POST and PATCH.\n@@ -717,12 +619,7 @@ class SkillSettings(dict):\n \n if self._should_upload_from_change:\n settings_meta = self._load_settings_meta()\n- hashed_meta = self._get_meta_hash(settings_meta)\n- uuid = self._load_uuid()\n- if uuid is not None:\n- self._delete_metadata(uuid)\n- self._upload_meta(settings_meta, hashed_meta)\n-\n+ self._upload_meta(settings_meta, self.skill_gid)\n \n def _get_meta_path(base_directory):\n json_path = join(base_directory, 'settingsmeta.json')\n" } ]
39c844a257ef934b244af28c882c1fe93f6fe61d
mycroftai/mycroft-core
14.06.2019 12:56:48
Apache License 2.0
Update old style metaclasses the old "__metaclass__" has been ignored since the switch to python 3 this restores the metaclass functionality by updating it to the new class kwarg syntax
[ { "change_type": "MODIFY", "old_path": "mycroft/audio/services/__init__.py", "new_path": "mycroft/audio/services/__init__.py", "diff": "@@ -15,7 +15,7 @@\n from abc import ABCMeta, abstractmethod\n \n \n-class AudioBackend:\n+class AudioBackend(metaclass=ABCMeta):\n \"\"\"\n Base class for all audio backend implementations.\n \n@@ -23,7 +23,6 @@ class AudioBackend:\n config: configuration dict for the instance\n bus: Mycroft messagebus emitter\n \"\"\"\n- __metaclass__ = ABCMeta\n \n def __init__(self, config, bus):\n self._track_start_callback = None\n" }, { "change_type": "MODIFY", "old_path": "mycroft/stt/__init__.py", "new_path": "mycroft/stt/__init__.py", "diff": "@@ -25,9 +25,8 @@ from mycroft.configuration import Configuration\n from mycroft.util.log import LOG\n \n \n-class STT:\n- __metaclass__ = ABCMeta\n-\n+class STT(metaclass=ABCMeta):\n+ \"\"\" STT Base class, all STT backends derives from this one. \"\"\"\n def __init__(self):\n config_core = Configuration.get()\n self.lang = str(self.init_language(config_core))\n@@ -50,24 +49,19 @@ class STT:\n pass\n \n \n-class TokenSTT(STT):\n- __metaclass__ = ABCMeta\n-\n+class TokenSTT(STT, metaclass=ABCMeta):\n def __init__(self):\n super(TokenSTT, self).__init__()\n self.token = str(self.credential.get(\"token\"))\n \n \n-class GoogleJsonSTT(STT):\n- __metaclass__ = ABCMeta\n-\n+class GoogleJsonSTT(STT, metaclass=ABCMeta):\n def __init__(self):\n super(GoogleJsonSTT, self).__init__()\n self.json_credentials = json.dumps(self.credential.get(\"json\"))\n \n \n-class BasicSTT(STT):\n- __metaclass__ = ABCMeta\n+class BasicSTT(STT, metaclass=ABCMeta):\n \n def __init__(self):\n super(BasicSTT, self).__init__()\n@@ -75,8 +69,7 @@ class BasicSTT(STT):\n self.password = str(self.credential.get(\"password\"))\n \n \n-class KeySTT(STT):\n- __metaclass__ = ABCMeta\n+class KeySTT(STT, metaclass=ABCMeta):\n \n def __init__(self):\n super(KeySTT, self).__init__()\n@@ -169,11 +162,10 @@ class DeepSpeechServerSTT(STT):\n return response.text\n \n \n-class StreamThread(Thread):\n+class StreamThread(Thread, metaclass=ABCMeta):\n \"\"\"\n ABC class to be used with StreamingSTT class implementations.\n \"\"\"\n- __metaclass__ = ABCMeta\n \n def __init__(self, queue, language):\n super().__init__()\n@@ -197,12 +189,10 @@ class StreamThread(Thread):\n pass\n \n \n-class StreamingSTT(STT):\n+class StreamingSTT(STT, metaclass=ABCMeta):\n \"\"\"\n ABC class for threaded streaming STT implemenations.\n \"\"\"\n- __metaclass__ = ABCMeta\n-\n def __init__(self):\n super().__init__()\n self.stream = None\n" }, { "change_type": "MODIFY", "old_path": "mycroft/tts/__init__.py", "new_path": "mycroft/tts/__init__.py", "diff": "@@ -141,7 +141,7 @@ class PlaybackThread(Thread):\n self.clear_queue()\n \n \n-class TTS:\n+class TTS(metaclass=ABCMeta):\n \"\"\"\n TTS abstract class to be implemented by all TTS engines.\n \n@@ -155,8 +155,6 @@ class TTS:\n phonetic_spelling (bool): Whether to spell certain words phonetically\n ssml_tags (list): Supported ssml properties. Ex. ['speak', 'prosody']\n \"\"\"\n- __metaclass__ = ABCMeta\n-\n def __init__(self, lang, config, validator, audio_ext='wav',\n phonetic_spelling=True, ssml_tags=None):\n super(TTS, self).__init__()\n@@ -395,15 +393,13 @@ class TTS:\n self.playback.join()\n \n \n-class TTSValidator:\n+class TTSValidator(metaclass=ABCMeta):\n \"\"\"\n TTS Validator abstract class to be implemented by all TTS engines.\n \n It exposes and implements ``validate(tts)`` function as a template to\n validate the TTS engines.\n \"\"\"\n- __metaclass__ = ABCMeta\n-\n def __init__(self, tts):\n self.tts = tts\n \n" } ]
b28d63cf7134602924a18bc9f40518a404923c63
mycroftai/mycroft-core
09.08.2019 09:43:27
Apache License 2.0
Make register_intent decorator register all intent types This makes the register_intent decorator work for all types of intents, both Adapt and Padatious.
[ { "change_type": "MODIFY", "old_path": "mycroft/skills/mycroft_skill/decorators.py", "new_path": "mycroft/skills/mycroft_skill/decorators.py", "diff": "@@ -30,7 +30,10 @@ def intent_handler(intent_parser):\n \n \n def intent_file_handler(intent_file):\n- \"\"\"Decorator for adding a method as an intent file handler.\"\"\"\n+ \"\"\"Decorator for adding a method as an intent file handler.\n+\n+ This decorator is deprecated, use intent_handler for the same effect.\n+ \"\"\"\n \n def real_decorator(func):\n # Store the intent_file inside the function\n" }, { "change_type": "MODIFY", "old_path": "mycroft/skills/mycroft_skill/mycroft_skill.py", "new_path": "mycroft/skills/mycroft_skill/mycroft_skill.py", "diff": "@@ -804,25 +804,37 @@ class MycroftSkill:\n self.bus.remove_all_listeners(name)\n return removed\n \n+ def _register_adapt_intent(self, intent_parser, handler):\n+ \"\"\"Register an adapt intent.\n+\n+ Arguments:\n+ intent_parser: Intent object to parse utterance for the handler.\n+ handler (func): function to register with intent\n+ \"\"\"\n+ # Default to the handler's function name if none given\n+ name = intent_parser.name or handler.__name__\n+ munge_intent_parser(intent_parser, name, self.skill_id)\n+ self.bus.emit(Message(\"register_intent\", intent_parser.__dict__))\n+ self.registered_intents.append((name, intent_parser))\n+ self.add_event(intent_parser.name, handler, 'mycroft.skill.handler')\n+\n def register_intent(self, intent_parser, handler):\n \"\"\"Register an Intent with the intent service.\n \n Arguments:\n- intent_parser: Intent or IntentBuilder object to parse\n- utterance for the handler.\n+ intent_parser: Intent, IntentBuilder object or padatious intent\n+ file to parse utterance for the handler.\n handler (func): function to register with intent\n \"\"\"\n if isinstance(intent_parser, IntentBuilder):\n intent_parser = intent_parser.build()\n+ if (isinstance(intent_parser, str) and\n+ intent_parser.endswith('.intent')):\n+ return self.register_intent_file(intent_parser, handler)\n elif not isinstance(intent_parser, Intent):\n raise ValueError('\"' + str(intent_parser) + '\" is not an Intent')\n \n- # Default to the handler's function name if none given\n- name = intent_parser.name or handler.__name__\n- munge_intent_parser(intent_parser, name, self.skill_id)\n- self.bus.emit(Message(\"register_intent\", intent_parser.__dict__))\n- self.registered_intents.append((name, intent_parser))\n- self.add_event(intent_parser.name, handler, 'mycroft.skill.handler')\n+ return self._register_adapt_intent(intent_parser, handler)\n \n def register_intent_file(self, intent_file, handler):\n \"\"\"Register an Intent file with the intent service.\n" }, { "change_type": "MODIFY", "old_path": "test/unittests/skills/test_core.py", "new_path": "test/unittests/skills/test_core.py", "diff": "@@ -307,7 +307,13 @@ class MycroftSkillTest(unittest.TestCase):\n self.emitter.reset()\n \n def test_register_intent_file(self):\n- s = SimpleSkill4()\n+ self._test_intent_file(SimpleSkill4())\n+\n+ def test_register_intent_intent_file(self):\n+ \"\"\"Test register intent files using register_intent.\"\"\"\n+ self._test_intent_file(SimpleSkill6())\n+\n+ def _test_intent_file(self, s):\n s.root_dir = abspath(join(dirname(__file__), 'intent_file'))\n s.bind(self.emitter)\n s.initialize()\n@@ -594,3 +600,15 @@ class SimpleSkill4(_TestSkill):\n \n def stop(self):\n pass\n+\n+\n+class SimpleSkill6(_TestSkill):\n+ \"\"\" Test skill for padatious intent \"\"\"\n+ skill_id = 'A'\n+\n+ def initialize(self):\n+ self.register_intent('test.intent', self.handler)\n+ self.register_entity_file('test_ent.entity')\n+\n+ def handler(self, message):\n+ pass\n" } ]
69231c5ed13dc6ad437392fe83cb06074b250dd2
mycroftai/mycroft-core
31.08.2019 17:34:10
Apache License 2.0
Make execute_test less complex Split the long function into several smaller ones just retaining the main logic.
[ { "change_type": "MODIFY", "old_path": "test/integrationtests/skills/skill_tester.py", "new_path": "test/integrationtests/skills/skill_tester.py", "diff": "@@ -281,6 +281,17 @@ class MockSkillsLoader(object):\n unload_skills(self.skills)\n \n \n+def load_test_case_file(test_case_file):\n+ \"\"\"Load a test case to run.\"\"\"\n+ print(\"\")\n+ print(color.HEADER + \"=\"*20 + \" RUNNING TEST \" + \"=\"*20 + color.RESET)\n+ print('Test file: ', test_case_file)\n+ with open(test_case_file, 'r') as f:\n+ test_case = json.load(f)\n+ print('Test:', json.dumps(test_case, indent=4, sort_keys=False))\n+ return test_case\n+\n+\n class SkillTest(object):\n \"\"\"\n This class is instantiated for each skill being tested. It holds the\n@@ -330,6 +341,92 @@ class SkillTest(object):\n s.get_response = orig_get_response\n s.settings = original_settings\n \n+ def send_play_query(self, s, test_case):\n+ \"\"\"Emit an event triggering the a check for playback possibilities.\"\"\"\n+ play_query = test_case['play_query']\n+ print('PLAY QUERY', color.USER_UTT + play_query + color.RESET)\n+ self.emitter.emit('play:query', Message('play:query:',\n+ {'phrase': play_query}))\n+\n+ def send_play_start(self, s, test_case):\n+ \"\"\"Emit an event starting playback from the skill.\"\"\"\n+ print('PLAY START')\n+ callback_data = test_case['play_start']\n+ callback_data['skill_id'] = s.skill_id\n+ self.emitter.emit('play:start',\n+ Message('play:start', callback_data))\n+\n+ def send_question(self, test_case):\n+ \"\"\"Emit a Question to the loaded skills.\"\"\"\n+ print(\"QUESTION: {}\".format(test_case['question']))\n+ callback_data = {'phrase': test_case['question']}\n+ self.emitter.emit('question:query',\n+ Message('question:query', data=callback_data))\n+\n+ def send_utterance(self, test_case):\n+ \"\"\"Emit an utterance to the loaded skills.\"\"\"\n+ utt = test_case['utterance']\n+ print(\"UTTERANCE:\", color.USER_UTT + utt + color.RESET)\n+ self.emitter.emit('recognizer_loop:utterance',\n+ Message('recognizer_loop:utterance',\n+ {'utterances': [utt]}))\n+\n+ def apply_test_settings(self, s, test_case):\n+ \"\"\"Replace the skills settings with settings from the test_case.\"\"\"\n+ s.settings = TestSettings('/tmp/', self.test_case_file)\n+ for key in test_case['settings']:\n+ s.settings[key] = test_case['settings'][key]\n+ print(color.YELLOW, 'will run test with custom settings:',\n+ '\\n{}'.format(s.settings), color.RESET)\n+\n+ def setup_get_response(self, s, test_case):\n+ \"\"\"Setup interception of get_response calls.\"\"\"\n+ def get_response(dialog='', data=None, announcement='',\n+ validator=None, on_fail=None, num_retries=-1):\n+ data = data or {}\n+ utt = announcement or s.dialog_renderer.render(dialog, data)\n+ print(color.MYCROFT + \">> \" + utt + color.RESET)\n+ s.speak(utt)\n+\n+ response = test_case['responses'].pop(0)\n+ print(\"SENDING RESPONSE:\",\n+ color.USER_UTT + response + color.RESET)\n+ return response\n+\n+ s.get_response = get_response\n+\n+ def remove_context(self, s, cxt):\n+ \"\"\"remove an adapt context.\"\"\"\n+ if isinstance(cxt, list):\n+ for x in cxt:\n+ MycroftSkill.remove_context(s, x)\n+ else:\n+ MycroftSkill.remove_context(s, cxt)\n+\n+ def set_context(self, s, cxt):\n+ \"\"\"Set an adapt context.\"\"\"\n+ for key, value in cxt.items():\n+ MycroftSkill.set_context(s, key, value)\n+\n+ def send_test_input(self, s, test_case):\n+ \"\"\"Emit an utterance, just like the STT engine does. This sends the\n+ provided text to the skill engine for intent matching and it then\n+ invokes the skill.\n+\n+ It also handles some special cases for common play skills and common\n+ query skills.\n+ \"\"\"\n+ if 'utterance' in test_case:\n+ self.send_utterance(test_case)\n+ elif 'play_query' in test_case:\n+ self.send_play_query(s, test_case)\n+ elif 'play_start' in test_case:\n+ self.send_play_start(s, test_case)\n+ elif 'question' in test_case:\n+ self.send_question(test_case)\n+ else:\n+ raise SkillTestError('No input provided in test case')\n+\n def execute_test(self, s):\n \"\"\" Execute test case.\n \n@@ -339,34 +436,13 @@ class SkillTest(object):\n Returns:\n (bool) True if the test succeeded completely.\n \"\"\"\n- print(\"\")\n- print(color.HEADER + \"=\"*20 + \" RUNNING TEST \" + \"=\"*20 + color.RESET)\n- print('Test file: ', self.test_case_file)\n- with open(self.test_case_file, 'r') as f:\n- test_case = json.load(f)\n- print('Test:', json.dumps(test_case, indent=4, sort_keys=False))\n+ test_case = load_test_case_file(self.test_case_file)\n \n if 'settings' in test_case:\n- s.settings = TestSettings('/tmp/', self.test_case_file)\n- for key in test_case['settings']:\n- s.settings[key] = test_case['settings'][key]\n- print(color.YELLOW, 'will run test with custom settings:',\n- '\\n{}'.format(s.settings), color.RESET)\n+ self.apply_test_settings(s, test_case)\n \n if 'responses' in test_case:\n- def get_response(dialog='', data=None, announcement='',\n- validator=None, on_fail=None, num_retries=-1):\n- data = data or {}\n- utt = announcement or s.dialog_renderer.render(dialog, data)\n- print(color.MYCROFT + \">> \" + utt + color.RESET)\n- s.speak(utt)\n-\n- response = test_case['responses'].pop(0)\n- print(\"SENDING RESPONSE:\",\n- color.USER_UTT + response + color.RESET)\n- return response\n-\n- s.get_response = get_response\n+ self.setup_get_response(s, test_case)\n \n # If we keep track of test status for the entire skill, then\n # get all intents from the skill, and mark current intent\n@@ -390,73 +466,67 @@ class SkillTest(object):\n # between test_cases\n cxt = test_case.get('remove_context', None)\n if cxt:\n- if isinstance(cxt, list):\n- for x in cxt:\n- MycroftSkill.remove_context(s, x)\n- else:\n- MycroftSkill.remove_context(s, cxt)\n+ self.remove_context(s, cxt)\n \n cxt = test_case.get('set_context', None)\n if cxt:\n- for key, value in cxt.items():\n- MycroftSkill.set_context(s, key, value)\n-\n- # Emit an utterance, just like the STT engine does. This sends the\n- # provided text to the skill engine for intent matching and it then\n- # invokes the skill.\n- if 'utterance' in test_case:\n- utt = test_case['utterance']\n- print(\"UTTERANCE:\", color.USER_UTT + utt + color.RESET)\n- self.emitter.emit('recognizer_loop:utterance',\n- Message('recognizer_loop:utterance',\n- {'utterances': [utt]}))\n- elif 'play_query' in test_case:\n- play_query = test_case['play_query']\n- print('PLAY QUERY', color.USER_UTT + play_query + color.RESET)\n- self.emitter.emit('play:query', Message('play:query:',\n- {'phrase': play_query}))\n- elif 'play_start' in test_case:\n- print('PLAY START')\n- callback_data = test_case['play_start']\n- callback_data['skill_id'] = s.skill_id\n- self.emitter.emit('play:start',\n- Message('play:start', callback_data))\n- elif 'question' in test_case:\n- print(\"QUESTION: {}\".format(test_case['question']))\n- callback_data = {'phrase': test_case['question']}\n- self.emitter.emit('question:query',\n- Message('question:query', data=callback_data))\n- else:\n- raise SkillTestError('No input utterance provided')\n+ self.set_context(s, cxt)\n \n+ self.send_test_input(s, test_case)\n # Wait up to X seconds for the test_case to complete\n- timeout = time.time() + int(test_case.get('evaluation_timeout')) \\\n- if test_case.get('evaluation_timeout', None) and \\\n- isinstance(test_case['evaluation_timeout'], int) \\\n- else time.time() + DEFAULT_EVALUAITON_TIMEOUT\n- while not evaluation_rule.all_succeeded():\n- try:\n- event = q.get(timeout=1)\n- if ':' in event.type:\n- event.data['__type__'] = event.type.split(':')[1]\n- else:\n- event.data['__type__'] = event.type\n+ timeout = self.get_timeout(test_case)\n \n- evaluation_rule.evaluate(event.data)\n- if event.type == 'mycroft.skill.handler.complete':\n- break\n- except Empty:\n- pass\n- if time.time() > timeout:\n+ while not evaluation_rule.all_succeeded():\n+ # Process the queue until a skill handler sends a complete message\n+ if self.check_queue(q, evaluation_rule) or time.time() > timeout:\n break\n \n- # Stop emmiter from sending on queue\n+ self.shutdown_emitter(s)\n+\n+ # Report test result if failed\n+ return self.results(evaluation_rule)\n+\n+ def get_timeout(self, test_case):\n+ \"\"\"Find any timeout specified in test case.\n+\n+ If no timeout is specified return the default.\n+ \"\"\"\n+ if (test_case.get('evaluation_timeout', None) and\n+ isinstance(test_case['evaluation_timeout'], int)):\n+ return time.time() + int(test_case.get('evaluation_timeout'))\n+ else:\n+ return time.time() + DEFAULT_EVALUAITON_TIMEOUT\n+\n+ def check_queue(self, q, evaluation_rule):\n+ \"\"\"Check the queue for events.\n+\n+ If event indicating skill completion is found returns True, else False.\n+ \"\"\"\n+ try:\n+ event = q.get(timeout=1)\n+ if ':' in event.type:\n+ event.data['__type__'] = event.type.split(':')[1]\n+ else:\n+ event.data['__type__'] = event.type\n+\n+ evaluation_rule.evaluate(event.data)\n+ if event.type == 'mycroft.skill.handler.complete':\n+ return True\n+ except Empty:\n+ pass\n+ return False\n+\n+ def shutdown_emitter(self, s):\n+ \"\"\"Shutdown the skill connection to the bus.\"\"\"\n+ # Stop emiter from sending on queue\n s.bus.q = None\n \n # remove the skill which is not responding\n self.emitter.remove_all_listeners('speak')\n self.emitter.remove_all_listeners('mycroft.skill.handler.complete')\n- # Report test result if failed\n+\n+ def results(self, evaluation_rule):\n+ \"\"\"Display and report the results.\"\"\"\n if not evaluation_rule.all_succeeded():\n self.failure_msg = str(evaluation_rule.get_failure())\n print(color.FAIL + \"Evaluation failed\" + color.RESET)\n@@ -497,7 +567,7 @@ def load_dialog_list(skill, dialog):\n return dialogs\n \n \n-class EvaluationRule(object):\n+class EvaluationRule:\n \"\"\"\n This class initially convert the test_case json file to internal rule\n format, which is stored throughout the testcase run. All Messages on\n" } ]
29db163a78cddc14e4d9a788c8bc7ed8e6eda9cf
mycroftai/mycroft-core
04.10.2019 08:00:06
Apache License 2.0
Move listen trigger to last chunk of sentence If rendering a chunk of a sentence takes too long time, the audio queue may run out and trigger the listening. This moves the listening trigger to after the last chunk.
[ { "change_type": "MODIFY", "old_path": "mycroft/audio/speech.py", "new_path": "mycroft/audio/speech.py", "diff": "@@ -35,11 +35,6 @@ mimic_fallback_obj = None\n _last_stop_signal = 0\n \n \n-def _start_listener(_):\n- \"\"\"Force Mycroft to start listening (as if 'Hey Mycroft' was spoken).\"\"\"\n- bus.emit(Message('mycroft.mic.listen'))\n-\n-\n def handle_speak(event):\n \"\"\"Handle \"speak\" message\n \n@@ -60,11 +55,7 @@ def handle_speak(event):\n stopwatch = Stopwatch()\n stopwatch.start()\n utterance = event.data['utterance']\n- if event.data.get('expect_response', False):\n- # When expect_response is requested, the listener will be restarted\n- # at the end of the next bit of spoken audio.\n- bus.once('recognizer_loop:audio_output_end', _start_listener)\n-\n+ listen = event.data.get('expect_response', False)\n # This is a bit of a hack for Picroft. The analog audio on a Pi blocks\n # for 30 seconds fairly often, so we don't want to break on periods\n # (decreasing the chance of encountering the block). But we will\n@@ -82,7 +73,10 @@ def handle_speak(event):\n utterance = re.sub(r'\\b([A-za-z][\\.])(\\s+)', r'\\g<1>', utterance)\n chunks = re.split(r'(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\;|\\?)\\s',\n utterance)\n- for chunk in chunks:\n+ # Apply the listen flag to the last chunk, set the rest to False\n+ chunks = [(chunks[i], listen if i == len(chunks) - 1 else False)\n+ for i in range(len(chunks))]\n+ for chunk, listen in chunks:\n # Check if somthing has aborted the speech\n if (_last_stop_signal > start or\n check_for_signal('buttonPress')):\n@@ -90,7 +84,7 @@ def handle_speak(event):\n tts.playback.clear()\n break\n try:\n- mute_and_speak(chunk, ident)\n+ mute_and_speak(chunk, ident, listen)\n except KeyboardInterrupt:\n raise\n except Exception:\n@@ -103,7 +97,7 @@ def handle_speak(event):\n 'tts': tts.__class__.__name__})\n \n \n-def mute_and_speak(utterance, ident):\n+def mute_and_speak(utterance, ident, listen=False):\n \"\"\"Mute mic and start speaking the utterance using selected tts backend.\n \n Arguments:\n@@ -125,7 +119,7 @@ def mute_and_speak(utterance, ident):\n \n LOG.info(\"Speak: \" + utterance)\n try:\n- tts.execute(utterance, ident)\n+ tts.execute(utterance, ident, listen)\n except RemoteTTSTimeoutException as e:\n LOG.error(e)\n mimic_fallback_tts(utterance, ident)\n" }, { "change_type": "MODIFY", "old_path": "mycroft/tts/__init__.py", "new_path": "mycroft/tts/__init__.py", "diff": "@@ -19,7 +19,7 @@ import random\n import re\n from abc import ABCMeta, abstractmethod\n from threading import Thread\n-from time import time\n+from time import time, sleep\n \n import os.path\n from os.path import dirname, exists, isdir, join\n@@ -83,7 +83,8 @@ class PlaybackThread(Thread):\n \"\"\"Thread main loop. get audio and viseme data from queue and play.\"\"\"\n while not self._terminated:\n try:\n- snd_type, data, visemes, ident = self.queue.get(timeout=2)\n+ snd_type, data, visemes, ident, listen = \\\n+ self.queue.get(timeout=2)\n self.blink(0.5)\n if not self._processing_queue:\n self._processing_queue = True\n@@ -111,7 +112,7 @@ class PlaybackThread(Thread):\n except Exception as e:\n LOG.exception(e)\n if self._processing_queue:\n- self.tts.end_audio()\n+ self.tts.end_audio(listen)\n self._processing_queue = False\n \n def show_visemes(self, pairs):\n@@ -196,7 +197,7 @@ class TTS(metaclass=ABCMeta):\n # Create signals informing start of speech\n self.bus.emit(Message(\"recognizer_loop:audio_output_start\"))\n \n- def end_audio(self):\n+ def end_audio(self, listen):\n \"\"\"Helper function for child classes to call in execute().\n \n Sends the recognizer_loop:audio_output_end message, indicating\n@@ -205,6 +206,8 @@ class TTS(metaclass=ABCMeta):\n \"\"\"\n \n self.bus.emit(Message(\"recognizer_loop:audio_output_end\"))\n+ if listen:\n+ self.bus.emit(Message('mycroft.mic.listen'))\n # Clean the cache as needed\n cache_dir = mycroft.util.get_cache_directory(\"tts/\" + self.tts_name)\n mycroft.util.curate_cache(cache_dir, min_free_percent=100)\n@@ -287,15 +290,17 @@ class TTS(metaclass=ABCMeta):\n \"\"\"\n return [sentence]\n \n- def execute(self, sentence, ident=None):\n+ def execute(self, sentence, ident=None, listen=False):\n \"\"\"Convert sentence to speech, preprocessing out unsupported ssml\n \n The method caches results if possible using the hash of the\n sentence.\n \n- Args:\n+ Arguments:\n sentence: Sentence to be spoken\n ident: Id reference to current interaction\n+ listen: True if listen should be triggered at the end\n+ of the utterance.\n \"\"\"\n sentence = self.validate_ssml(sentence)\n \n@@ -307,7 +312,11 @@ class TTS(metaclass=ABCMeta):\n self.spellings[word.lower()])\n \n chunks = self._preprocess_sentence(sentence)\n- for sentence in chunks:\n+ # Apply the listen flag to the last chunk, set the rest to False\n+ chunks = [(chunks[i], listen if i == len(chunks) - 1 else False)\n+ for i in range(len(chunks))]\n+\n+ for sentence, l in chunks:\n key = str(hashlib.md5(\n sentence.encode('utf-8', 'ignore')).hexdigest())\n wav_file = os.path.join(\n@@ -323,7 +332,7 @@ class TTS(metaclass=ABCMeta):\n self.save_phonemes(key, phonemes)\n \n vis = self.viseme(phonemes) if phonemes else None\n- self.queue.put((self.audio_ext, wav_file, vis, ident))\n+ self.queue.put((self.audio_ext, wav_file, vis, ident, l))\n \n def viseme(self, phonemes):\n \"\"\"Create visemes from phonemes. Needs to be implemented for all\n" } ]
82e350064cb8d1622c7cde275567ae594483fe62
mycroftai/mycroft-core
28.03.2020 18:22:47
Apache License 2.0
Simplify and clean up curate_cache - create helper functions for getting file stats and removing files in order of age - create wrapper function for turning MB into bytes
[ { "change_type": "MODIFY", "old_path": "mycroft/util/file_utils.py", "new_path": "mycroft/util/file_utils.py", "diff": "@@ -121,27 +121,74 @@ def read_dict(filename, div='='):\n return d\n \n \n+def mb_to_bytes(size):\n+ \"\"\"Takes a size in MB and returns the number of bytes.\n+\n+ Arguments:\n+ size(int/float): size in Mega Bytes\n+\n+ Returns:\n+ (int/float) size in bytes\n+ \"\"\"\n+ return size * 1024 * 1024\n+\n+\n+def _get_cache_entries(directory):\n+ \"\"\"Get information tuple for all regular files in directory.\n+\n+ Arguments:\n+ directory (str): path to directory to check\n+\n+ Returns:\n+ (tuple) (modification time, size, filepath)\n+ \"\"\"\n+ entries = (os.path.join(directory, fn) for fn in os.listdir(directory))\n+ entries = ((os.stat(path), path) for path in entries)\n+\n+ # leave only regular files, insert modification date\n+ return ((stat[ST_MTIME], stat[ST_SIZE], path)\n+ for stat, path in entries if S_ISREG(stat[ST_MODE]))\n+\n+\n+def _delete_oldest(entries, bytes_needed):\n+ \"\"\"Delete files with oldest modification date until space is freed.\n+\n+ Arguments:\n+ entries (tuple): file + file stats tuple\n+ bytes_needed (int): disk space that needs to be freed\n+ \"\"\"\n+ space_freed = 0\n+ for moddate, fsize, path in sorted(entries):\n+ try:\n+ os.remove(path)\n+ space_freed += fsize\n+ except Exception:\n+ pass\n+\n+ if space_freed > bytes_needed:\n+ break # deleted enough!\n+\n+\n def curate_cache(directory, min_free_percent=5.0, min_free_disk=50):\n- \"\"\"Clear out the directory if needed\n+ \"\"\"Clear out the directory if needed.\n \n- This assumes all the files in the directory can be deleted as freely\n+ The curation will only occur if both the precentage and actual disk space\n+ is below the limit. This assumes all the files in the directory can be\n+ deleted as freely.\n \n- Args:\n+ Arguments:\n directory (str): directory path that holds cached files\n min_free_percent (float): percentage (0.0-100.0) of drive to keep free,\n default is 5% if not specified.\n min_free_disk (float): minimum allowed disk space in MB, default\n value is 50 MB if not specified.\n \"\"\"\n-\n # Simpleminded implementation -- keep a certain percentage of the\n # disk available.\n # TODO: Would be easy to add more options, like whitelisted files, etc.\n space = psutil.disk_usage(directory)\n \n- # convert from MB to bytes\n- min_free_disk *= 1024 * 1024\n- # space.percent = space.used/space.total*100.0\n+ min_free_disk = mb_to_bytes(min_free_disk)\n percent_free = 100.0 - space.percent\n if percent_free < min_free_percent and space.free < min_free_disk:\n LOG.info('Low diskspace detected, cleaning cache')\n@@ -150,24 +197,9 @@ def curate_cache(directory, min_free_percent=5.0, min_free_disk=50):\n bytes_needed = int(bytes_needed + 1.0)\n \n # get all entries in the directory w/ stats\n- entries = (os.path.join(directory, fn) for fn in os.listdir(directory))\n- entries = ((os.stat(path), path) for path in entries)\n-\n- # leave only regular files, insert modification date\n- entries = ((stat[ST_MTIME], stat[ST_SIZE], path)\n- for stat, path in entries if S_ISREG(stat[ST_MODE]))\n-\n- # delete files with oldest modification date until space is freed\n- space_freed = 0\n- for moddate, fsize, path in sorted(entries):\n- try:\n- os.remove(path)\n- space_freed += fsize\n- except Exception:\n- pass\n-\n- if space_freed > bytes_needed:\n- return # deleted enough!\n+ entries = _get_cache_entries(directory)\n+ # delete as many as needed starting with the oldest\n+ _delete_oldest(entries, bytes_needed)\n \n \n def get_cache_directory(domain=None):\n" } ]
010562f3e160b8e2b4fde2102f9fc19dbcbee1ef
mycroftai/mycroft-core
18.05.2020 08:52:05
Apache License 2.0
Fix remove_fallback() remove_fallback() was mainly created for use internally during shutdown and required the wrapped callable to be able to remove a handler. This makes it general, using a mapping to find the wrapper from a handler if needed. The method now also returns the success/failure status
[ { "change_type": "MODIFY", "old_path": "mycroft/skills/fallback_skill.py", "new_path": "mycroft/skills/fallback_skill.py", "diff": "@@ -48,6 +48,7 @@ class FallbackSkill(MycroftSkill):\n utterance will not be see by any other Fallback handlers.\n \"\"\"\n fallback_handlers = {}\n+ wrapper_map = [] # Map containing (handler, wrapper) tuples\n \n def __init__(self, name=None, bus=None, use_settings=True):\n super().__init__(name, bus, use_settings)\n@@ -98,18 +99,25 @@ class FallbackSkill(MycroftSkill):\n return handler\n \n @classmethod\n- def _register_fallback(cls, handler, priority):\n+ def _register_fallback(cls, handler, wrapper, priority):\n \"\"\"Register a function to be called as a general info fallback\n Fallback should receive message and return\n a boolean (True if succeeded or False if failed)\n \n Lower priority gets run first\n 0 for high priority 100 for low priority\n+\n+ Arguments:\n+ handler (callable): original handler, used as a reference when\n+ removing\n+ wrapper (callable): wrapped version of handler\n+ priority (int): fallback priority\n \"\"\"\n while priority in cls.fallback_handlers:\n priority += 1\n \n- cls.fallback_handlers[priority] = handler\n+ cls.fallback_handlers[priority] = wrapper\n+ cls.wrapper_map.append((handler, wrapper))\n \n def register_fallback(self, handler, priority):\n \"\"\"Register a fallback with the list of fallback handlers and with the\n@@ -122,8 +130,28 @@ class FallbackSkill(MycroftSkill):\n return True\n return False\n \n- self.instance_fallback_handlers.append(wrapper)\n- self._register_fallback(wrapper, priority)\n+ self.instance_fallback_handlers.append(handler)\n+ self._register_fallback(handler, wrapper, priority)\n+\n+ @classmethod\n+ def _remove_registered_handler(cls, wrapper_to_del):\n+ \"\"\"Remove a registered wrapper.\n+\n+ Arguments:\n+ wrapper_to_del (callable): wrapped handler to be removed\n+\n+ Returns:\n+ (bool) True if one or more handlers were removed, otherwise False.\n+ \"\"\"\n+ found_handler = False\n+ for priority, handler in list(cls.fallback_handlers.items()):\n+ if handler == wrapper_to_del:\n+ found_handler = True\n+ del cls.fallback_handlers[priority]\n+\n+ if not found_handler:\n+ LOG.warning('No fallback matching {}'.format(wrapper_to_del))\n+ return found_handler\n \n @classmethod\n def remove_fallback(cls, handler_to_del):\n@@ -131,15 +159,27 @@ class FallbackSkill(MycroftSkill):\n \n Arguments:\n handler_to_del: reference to handler\n+ Returns:\n+ (bool) True if at least one handler was removed, otherwise False\n \"\"\"\n- for priority, handler in cls.fallback_handlers.items():\n- if handler == handler_to_del:\n- del cls.fallback_handlers[priority]\n- return\n- LOG.warning('Could not remove fallback!')\n+ # Find wrapper from handler or wrapper\n+ wrapper_to_del = None\n+ for h, w in cls.wrapper_map:\n+ if handler_to_del in (h, w):\n+ wrapper_to_del = w\n+ break\n+\n+ if wrapper_to_del:\n+ cls.wrapper_map.remove((h, w))\n+ remove_ok = cls._remove_registered_handler(wrapper_to_del)\n+ else:\n+ LOG.warning('Could not find matching fallback handler')\n+ remove_ok = False\n+ return remove_ok\n \n def remove_instance_handlers(self):\n \"\"\"Remove all fallback handlers registered by the fallback skill.\"\"\"\n+ self.log.info('Removing all handlers...')\n while len(self.instance_fallback_handlers):\n handler = self.instance_fallback_handlers.pop()\n self.remove_fallback(handler)\n" }, { "change_type": "MODIFY", "old_path": "test/unittests/skills/test_fallback_skill.py", "new_path": "test/unittests/skills/test_fallback_skill.py", "diff": "@@ -35,11 +35,14 @@ class TestFallbackSkill(TestCase):\n fb_skill = setup_fallback(SimpleFallback)\n self.assertEqual(len(FallbackSkill.fallback_handlers), 1)\n \n- fb_skill.remove_fallback(fb_skill.fallback_handler)\n+ self.assertTrue(fb_skill.remove_fallback(fb_skill.fallback_handler))\n # Both internal trackers of handlers should be cleared now\n self.assertEqual(len(FallbackSkill.fallback_handlers), 0)\n self.assertEqual(len(FallbackSkill.wrapper_map), 0)\n \n+ # Removing after it's already been removed should fail\n+ self.assertFalse(fb_skill.remove_fallback(fb_skill.fallback_handler))\n+\n \n class SimpleFallback(FallbackSkill):\n \"\"\"Simple fallback skill used for test.\"\"\"\n" } ]
b7d709c3c86af4f58cc8a8f7bbd089f319a0718b
mycroftai/mycroft-core
03.07.2020 08:29:12
Apache License 2.0
Add wait_for_message() method to messagebus client - Refactor message waiting into a MessageWaiter class to be able to use the same code in both wait_for_message and wait_for_response. - Add some basic unittests
[ { "change_type": "MODIFY", "old_path": "mycroft/messagebus/client/__init__.py", "new_path": "mycroft/messagebus/client/__init__.py", "diff": "@@ -11,4 +11,4 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n-from .client import MessageBusClient\n+from .client import MessageBusClient, MessageWaiter\n" }, { "change_type": "MODIFY", "old_path": "mycroft/messagebus/client/client.py", "new_path": "mycroft/messagebus/client/client.py", "diff": "@@ -30,6 +30,53 @@ from mycroft.util.log import LOG\n from .threaded_event_emitter import ThreadedEventEmitter\n \n \n+class MessageWaiter:\n+ \"\"\"Wait for a single message.\n+\n+ Encapsulate the wait for a message logic separating the setup from\n+ the actual waiting act so the waiting can be setuo, actions can be\n+ performed and _then_ the message can be waited for.\n+\n+ Argunments:\n+ bus: Bus to check for messages on\n+ message_type: message type to wait for\n+ \"\"\"\n+ def __init__(self, bus, message_type):\n+ self.bus = bus\n+ self.msg_type = message_type\n+ self.received_msg = None\n+ # Setup response handler\n+ self.bus.once(message_type, self._handler)\n+\n+ def _handler(self, message):\n+ \"\"\"Receive response data.\"\"\"\n+ self.received_msg = message\n+\n+ def wait(self, timeout=3.0):\n+ \"\"\"Wait for message.\n+\n+ Arguments:\n+ timeout (int or float): seconds to wait for message\n+\n+ Returns:\n+ Message or None\n+ \"\"\"\n+ start_time = time.monotonic()\n+ while self.received_msg is None:\n+ time.sleep(0.2)\n+ if time.monotonic() - start_time > timeout:\n+ try:\n+ self.bus.remove(self.msg_type, self._handler)\n+ except (ValueError, KeyError):\n+ # ValueError occurs on pyee 5.0.1 removing handlers\n+ # registered with once.\n+ # KeyError may theoretically occur if the event occurs as\n+ # the handler is removed\n+ pass\n+ break\n+ return self.received_msg\n+\n+\n class MessageBusClient:\n def __init__(self, host=None, port=None, route=None, ssl=None):\n config_overrides = dict(host=host, port=port, route=route, ssl=ssl)\n@@ -120,6 +167,19 @@ class MessageBusClient:\n LOG.warning('Could not send {} message because connection '\n 'has been closed'.format(message.msg_type))\n \n+ def wait_for_message(self, message_type, timeout=3.0):\n+ \"\"\"Wait for a message of a specific type.\n+\n+ Arguments:\n+ message_type (str): the message type of the expected message\n+ timeout: seconds to wait before timeout, defaults to 3\n+\n+ Returns:\n+ The received message or None if the response timed out\n+ \"\"\"\n+\n+ return MessageWaiter(self, message_type).wait(timeout)\n+\n def wait_for_response(self, message, reply_type=None, timeout=3.0):\n \"\"\"Send a message and wait for a response.\n \n@@ -132,32 +192,11 @@ class MessageBusClient:\n Returns:\n The received message or None if the response timed out\n \"\"\"\n- response = None\n-\n- def handler(message):\n- \"\"\"Receive response data.\"\"\"\n- nonlocal response\n- response = message\n-\n- # Setup response handler\n- self.once(reply_type or message.msg_type + '.response', handler)\n- # Send request\n+ message_type = reply_type or message.msg_type + '.response'\n+ waiter = MessageWaiter(self, message_type) # Setup response handler\n+ # Send message and wait for it's response\n self.emit(message)\n- # Wait for response\n- start_time = time.monotonic()\n- while response is None:\n- time.sleep(0.2)\n- if time.monotonic() - start_time > timeout:\n- try:\n- self.remove(reply_type, handler)\n- except (ValueError, KeyError):\n- # ValueError occurs on pyee 1.0.1 removing handlers\n- # registered with once.\n- # KeyError may theoretically occur if the event occurs as\n- # the handler is removed\n- pass\n- return None\n- return response\n+ return waiter.wait()\n \n def on(self, event_name, func):\n self.emitter.on(event_name, func)\n" }, { "change_type": "MODIFY", "old_path": "test/unittests/messagebus/client/test_client.py", "new_path": "test/unittests/messagebus/client/test_client.py", "diff": "@@ -12,9 +12,10 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n-from unittest.mock import patch\n+from unittest import TestCase\n+from unittest.mock import patch, Mock\n \n-from mycroft.messagebus.client import MessageBusClient\n+from mycroft.messagebus.client import MessageBusClient, MessageWaiter\n \n WS_CONF = {\n 'websocket': {\n@@ -37,3 +38,22 @@ class TestMessageBusClient:\n def test_create_client(self, mock_conf):\n mc = MessageBusClient()\n assert mc.client.url == 'ws://testhost:1337/core'\n+\n+\n+class TestMessageWaiter(TestCase):\n+ def test_message_wait_success(self):\n+ bus = Mock()\n+ waiter = MessageWaiter(bus, 'delayed.message')\n+ bus.once.assert_called_with('delayed.message', waiter._handler)\n+\n+ test_msg = Mock(name='test_msg')\n+ waiter._handler(test_msg) # Inject response\n+\n+ self.assertEqual(waiter.wait(), test_msg)\n+\n+ def test_message_wait_timeout(self):\n+ bus = Mock()\n+ waiter = MessageWaiter(bus, 'delayed.message')\n+ bus.once.assert_called_with('delayed.message', waiter._handler)\n+\n+ self.assertEqual(waiter.wait(0.3), None)\n" } ]
bea1f008c6cd825274cc7fa0fc507f64ed740a0d
mycroftai/mycroft-core
28.07.2020 07:48:50
Apache License 2.0
Add ProcessStatus class ProcessStatus tracks the process status and allows callbacks on changes and status queries over the messagebus. StatusCallbackMap is used to setup the callbacks ProcessState is an enum tracking the different states.
[ { "change_type": "MODIFY", "old_path": "mycroft/skills/__main__.py", "new_path": "mycroft/skills/__main__.py", "diff": "@@ -38,6 +38,7 @@ from mycroft.util import (\n )\n from mycroft.util.lang import set_active_lang\n from mycroft.util.log import LOG\n+from mycroft.util.process_utils import ProcessStatus, StatusCallbackMap\n from .core import FallbackSkill\n from .event_scheduler import EventScheduler\n from .intent_service import IntentService\n@@ -195,8 +196,14 @@ def main(ready_hook=on_ready, error_hook=on_error, stopping_hook=on_stopping,\n bus = start_message_bus_client(\"SKILLS\")\n _register_intent_services(bus)\n event_scheduler = EventScheduler(bus)\n+ callbacks = StatusCallbackMap(on_complete=ready_hook,\n+ on_error=error_hook,\n+ on_stopping=stopping_hook)\n+ status = ProcessStatus('skills', bus, callbacks)\n+\n skill_manager = _initialize_skill_manager(bus, watchdog)\n \n+ status.set_started()\n _wait_for_internet_connection()\n \n if skill_manager is None:\n@@ -207,9 +214,14 @@ def main(ready_hook=on_ready, error_hook=on_error, stopping_hook=on_stopping,\n skill_manager.start()\n while not skill_manager.is_alive():\n time.sleep(0.1)\n- ready_hook() # Report ready status\n+ status.set_alive()\n+\n+ while not skill_manager.is_all_loaded():\n+ time.sleep(0.1)\n+ status.set_ready()\n+\n wait_for_exit_signal()\n- stopping_hook() # Report shutdown started\n+ process_status.set_stopping()\n shutdown(skill_manager, event_scheduler)\n \n \n" }, { "change_type": "MODIFY", "old_path": "mycroft/skills/skill_manager.py", "new_path": "mycroft/skills/skill_manager.py", "diff": "@@ -158,8 +158,6 @@ class SkillManager(Thread):\n self.bus.on('skillmanager.keep', self.deactivate_except)\n self.bus.on('skillmanager.activate', self.activate_skill)\n self.bus.on('mycroft.paired', self.handle_paired)\n- self.bus.on('mycroft.skills.is_alive', self.is_alive)\n- self.bus.on('mycroft.skills.all_loaded', self.is_all_loaded)\n self.bus.on(\n 'mycroft.skills.settings.update',\n self.settings_downloader.download\n@@ -353,17 +351,10 @@ class SkillManager(Thread):\n \n def is_alive(self, message=None):\n \"\"\"Respond to is_alive status request.\"\"\"\n- if message:\n- status = {'status': self._alive_status}\n- self.bus.emit(message.response(data=status))\n return self._alive_status\n \n def is_all_loaded(self, message=None):\n \"\"\" Respond to all_loaded status request.\"\"\"\n- if message:\n- status = {'status': self._loaded_status}\n- self.bus.emit(message.response(data=status))\n-\n return self._loaded_status\n \n def send_skill_list(self, _):\n" }, { "change_type": "MODIFY", "old_path": "mycroft/util/process_utils.py", "new_path": "mycroft/util/process_utils.py", "diff": "@@ -1,6 +1,9 @@\n+from collections import namedtuple\n+from enum import IntEnum\n import json\n import logging\n import signal as sig\n+import sys\n from threading import Event, Thread\n from time import sleep\n \n@@ -152,3 +155,140 @@ def start_message_bus_client(service, bus=None, whitelist=None):\n LOG.info('Connected to messagebus')\n \n return bus\n+\n+\n+class ProcessState(IntEnum):\n+\n+ \"\"\"Oredered enum to make state checks easy.\n+\n+ For example Alive can be determined using >= ProcessState.ALIVE,\n+ which will return True if the state is READY as well as ALIVE.\n+ \"\"\"\n+ NOT_STARTED = 0\n+ STARTED = 1\n+ ERROR = 2\n+ STOPPING = 3\n+ ALIVE = 4\n+ READY = 5\n+\n+\n+# Process state change callback mappings.\n+_STATUS_CALLBACKS = [\n+ 'on_started',\n+ 'on_alive',\n+ 'on_complete',\n+ 'on_error',\n+ 'on_stopping',\n+]\n+# namedtuple defaults only available on 3.7 and later python versions\n+if sys.version_info < (3, 7):\n+ StatusCallbackMap = namedtuple('CallbackMap', _STATUS_CALLBACKS)\n+ StatusCallbackMap.__new__.__defaults__ = (None,) * 5\n+else:\n+ StatusCallbackMap = namedtuple(\n+ 'CallbackMap',\n+ _STATUS_CALLBACKS,\n+ defaults=(None,) * len(_STATUS_CALLBACKS),\n+ )\n+\n+\n+class ProcessStatus:\n+ \"\"\"Process status tracker.\n+\n+ The class tracks process status and execute callback methods on\n+ state changes as well as replies to messagebus queries of the\n+ process status.\n+\n+ Arguments:\n+ name (str): process name, will be used to create the messagebus\n+ messagetype \"mycroft.{name}...\".\n+ bus (MessageBusClient): Connection to the Mycroft messagebus.\n+ callback_map (StatusCallbackMap): optionally, status callbacks for the\n+ various status changes.\n+ \"\"\"\n+\n+ def __init__(self, name, bus, callback_map=None):\n+\n+ # Messagebus connection\n+ self.bus = bus\n+ self.name = name\n+\n+ self.callbacks = callback_map or StatusCallbackMap()\n+ self.state = ProcessState.NOT_STARTED\n+ self._register_handlers()\n+\n+ def _register_handlers(self):\n+ \"\"\"Register messagebus handlers for status queries.\"\"\"\n+ self.bus.on('mycroft.{}.is_alive'.format(self.name), self.check_alive)\n+ self.bus.on('mycroft.{}.ready'.format(self.name), self.check_ready)\n+ # The next one is for backwards compatibility\n+ # TODO: remove in 21.02\n+ self.bus.on(\n+ 'mycroft.{}.all_loaded'.format(self.name), self.check_ready\n+ )\n+\n+ def check_alive(self, message=None):\n+ \"\"\"Respond to is_alive status request.\n+\n+ Arguments:\n+ message: Optional message to respond to, if omitted no message\n+ is sent.\n+\n+ Returns:\n+ bool, True if process is alive.\n+ \"\"\"\n+ is_alive = self.state >= ProcessState.ALIVE\n+\n+ if message:\n+ status = {'status': is_alive}\n+ self.bus.emit(message.response(data=status))\n+\n+ return is_alive\n+\n+ def check_ready(self, message=None):\n+ \"\"\"Respond to all_loaded status request.\n+\n+ Arguments:\n+ message: Optional message to respond to, if omitted no message\n+ is sent.\n+\n+ Returns:\n+ bool, True if process is ready.\n+ \"\"\"\n+ is_ready = self.state >= ProcessState.READY\n+ if message:\n+ status = {'status': is_ready}\n+ self.bus.emit(message.response(data=status))\n+\n+ return is_ready\n+\n+ def set_started(self):\n+ \"\"\"Process is started.\"\"\"\n+ self.state = ProcessState.STARTED\n+ if self.callbacks.on_started:\n+ self.callbacks.on_started()\n+\n+ def set_alive(self):\n+ \"\"\"Basic loading is done.\"\"\"\n+ self.state = ProcessState.ALIVE\n+ if self.callbacks.on_alive:\n+ self.callbacks.on_alive()\n+\n+ def set_ready(self):\n+ \"\"\"All loading is done.\"\"\"\n+ self.state = ProcessState.READY\n+ if self.callbacks.on_complete:\n+ self.callbacks.on_complete()\n+\n+ def set_stopping(self):\n+ \"\"\"Process shutdown has started.\"\"\"\n+ self.state = ProcessState.STOPPING\n+ if self.callbacks.on_stopping:\n+ self.callbacks.on_stopping()\n+\n+ def set_error(self, err=''):\n+ \"\"\"An error has occured and the process is non-functional.\"\"\"\n+ # Intentionally leave is_started True\n+ self.state = ProcessState.ERROR\n+ if self.callbacks.on_error:\n+ self.callbacks.on_error(err)\n" } ]
0aae02115509a67b18f2a6cc9b691392a8d3fe3a
pyglet/pyglet
26.03.2017 18:19:36
BSD 3-Clause New or Revised License
Use pyglet.options['ffmpeg_libs_win'] to give the FFmpeg lib filenames. This has to be used on Windows, as the dll names have a format like avcode-57.dll, appending the version number at the end.
[ { "change_type": "MODIFY", "old_path": "pyglet/__init__.py", "new_path": "pyglet/__init__.py", "diff": "@@ -176,6 +176,16 @@ if getattr(sys, 'frozen', None):\n #:\n #: **Since:** pyglet 1.2\n #:\n+#: ffmpeg_libs_win\n+#: A tuple containing the FFmpeg dll filenames for Windows. As on this\n+#: platform there is no standard way to find the dll files, and the\n+#: FFmpeg dll names have version number appended at the end, it's easier\n+#: for the developer to state what are the filenames, and hence giving\n+#: the version for each dll files. By default, the filenames are\n+#: 'avcodec-57', 'avformat-57', 'avutil-55', 'swresample-2', 'swscale-4'\n+#:\n+#: **Since:** pyglet 1.4\n+#:\n options = {\n 'audio': ('directsound', 'pulse', 'openal', 'silent'),\n 'font': ('gdiplus', 'win32'), # ignored outside win32; win32 is deprecated\n@@ -193,6 +203,8 @@ options = {\n 'debug_trace_flush': True,\n 'debug_win32': False,\n 'debug_x11': False,\n+ 'ffmpeg_libs_win': ('avcodec-57', 'avformat-57', 'avutil-55', \n+ 'swresample-2', 'swscale-4'),\n 'graphics_vbo': True,\n 'shadow_window': True,\n 'vsync': None,\n@@ -219,6 +231,7 @@ _option_types = {\n 'debug_trace_flush': bool,\n 'debug_win32': bool,\n 'debug_x11': bool,\n+ 'ffmpeg_libs_win': tuple,\n 'graphics_vbo': bool,\n 'shadow_window': bool,\n 'vsync': bool,\n" }, { "change_type": "MODIFY", "old_path": "pyglet/media/sources/__init__.py", "new_path": "pyglet/media/sources/__init__.py", "diff": "@@ -3,6 +3,4 @@\n # Collect public interface\n from .loader import load, have_ffmpeg\n from .base import AudioFormat, VideoFormat, AudioData, SourceInfo\n-from .base import Source, StreamingSource, StaticSource, SourceGroup\n-\n-# from . import ffmpeg\n+from .base import Source, StreamingSource, StaticSource, SourceGroup\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "pyglet/media/sources/ffmpeg_lib/libavcodec.py", "new_path": "pyglet/media/sources/ffmpeg_lib/libavcodec.py", "diff": "@@ -38,13 +38,19 @@ from ctypes import (c_int, c_uint16, c_int32, c_int64, c_uint32, c_uint64,\n c_uint8, c_uint, c_double, c_float, c_ubyte, c_size_t, c_char, c_char_p, \r\n c_void_p, addressof, byref, cast, POINTER, CFUNCTYPE, Structure, Union, \r\n create_string_buffer, memmove)\r\n-from sys import platform\r\n \r\n+import pyglet\r\n import pyglet.lib\r\n from . import libavutil\r\n \r\n-if platform == 'win32':\r\n- avcodec = pyglet.lib.load_library('avcodec-57')\r\n+if pyglet.compat_platform == 'win32':\r\n+ for libname in pyglet.options['ffmpeg_libs_win']:\r\n+ if libname.startswith('avcodec'):\r\n+ avcodec = pyglet.lib.load_library(libname)\r\n+ break\r\n+ else:\r\n+ # As a last resort, try to load the dll with default name.\r\n+ avcodec = pyglet.lib.load_library('avcodec')\r\n else:\r\n avcodec = pyglet.lib.load_library('avcodec')\r\n \r\n" }, { "change_type": "MODIFY", "old_path": "pyglet/media/sources/ffmpeg_lib/libavformat.py", "new_path": "pyglet/media/sources/ffmpeg_lib/libavformat.py", "diff": "@@ -38,14 +38,20 @@ from ctypes import (c_int, c_uint16, c_int32, c_int64, c_uint32, c_uint64,\n c_uint8, c_uint, c_double, c_float, c_ubyte, c_size_t, c_char, c_char_p, \r\n c_void_p, addressof, byref, cast, POINTER, CFUNCTYPE, Structure, Union, \r\n create_string_buffer, memmove)\r\n-from sys import platform\r\n \r\n+import pyglet\r\n import pyglet.lib\r\n from . import libavcodec\r\n from . import libavutil\r\n \r\n-if platform == 'win32':\r\n- avformat = pyglet.lib.load_library('avformat-57')\r\n+if pyglet.compat_platform == 'win32':\r\n+ for libname in pyglet.options['ffmpeg_libs_win']:\r\n+ if libname.startswith('avformat'):\r\n+ avformat = pyglet.lib.load_library(libname)\r\n+ break\r\n+ else:\r\n+ # As a last resort, try to load the dll with default name.\r\n+ avformat = pyglet.lib.load_library('avformat')\r\n else:\r\n avformat = pyglet.lib.load_library('avformat')\r\n \r\n" }, { "change_type": "MODIFY", "old_path": "pyglet/media/sources/ffmpeg_lib/libavutil.py", "new_path": "pyglet/media/sources/ffmpeg_lib/libavutil.py", "diff": "@@ -38,12 +38,18 @@ from ctypes import (c_int, c_uint16, c_int32, c_int64, c_uint32, c_uint64,\n c_uint8, c_uint, c_double, c_float, c_ubyte, c_size_t, c_char, c_char_p, \r\n c_void_p, addressof, byref, cast, POINTER, CFUNCTYPE, Structure, Union, \r\n create_string_buffer, memmove)\r\n-from sys import platform\r\n \r\n+import pyglet\r\n import pyglet.lib\r\n \r\n-if platform == 'win32':\r\n- avutil = pyglet.lib.load_library('avutil-55')\r\n+if pyglet.compat_platform == 'win32':\r\n+ for libname in pyglet.options['ffmpeg_libs_win']:\r\n+ if libname.startswith('avutil'):\r\n+ avutil = pyglet.lib.load_library(libname)\r\n+ break\r\n+ else:\r\n+ # As a last resort, try to load the dll with default name.\r\n+ avutil = pyglet.lib.load_library('avutil')\r\n else:\r\n avutil = pyglet.lib.load_library('avutil')\r\n \r\n" }, { "change_type": "MODIFY", "old_path": "pyglet/media/sources/ffmpeg_lib/libswresample.py", "new_path": "pyglet/media/sources/ffmpeg_lib/libswresample.py", "diff": "@@ -38,12 +38,18 @@ from ctypes import (c_int, c_uint16, c_int32, c_int64, c_uint32, c_uint64,\n c_uint8, c_uint, c_double, c_float, c_ubyte, c_size_t, c_char, c_char_p, \r\n c_void_p, addressof, byref, cast, POINTER, CFUNCTYPE, Structure, Union, \r\n create_string_buffer, memmove)\r\n-from sys import platform\r\n \r\n+import pyglet\r\n import pyglet.lib\r\n \r\n-if platform == 'win32':\r\n- swresample = pyglet.lib.load_library('swresample-2')\r\n+if pyglet.compat_platform == 'win32':\r\n+ for libname in pyglet.options['ffmpeg_libs_win']:\r\n+ if libname.startswith('swresample'):\r\n+ swresample = pyglet.lib.load_library(libname)\r\n+ break\r\n+ else:\r\n+ # As a last resort, try to load the dll with default name.\r\n+ swresample = pyglet.lib.load_library('swresample')\r\n else:\r\n swresample = pyglet.lib.load_library('swresample')\r\n \r\n" }, { "change_type": "MODIFY", "old_path": "pyglet/media/sources/ffmpeg_lib/libswscale.py", "new_path": "pyglet/media/sources/ffmpeg_lib/libswscale.py", "diff": "@@ -38,12 +38,18 @@ from ctypes import (c_int, c_uint16, c_int32, c_int64, c_uint32, c_uint64,\n c_uint8, c_uint, c_double, c_float, c_ubyte, c_size_t, c_char, c_char_p, \r\n c_void_p, addressof, byref, cast, POINTER, CFUNCTYPE, Structure, Union, \r\n create_string_buffer, memmove)\r\n-from sys import platform\r\n \r\n+import pyglet\r\n import pyglet.lib\r\n \r\n-if platform == 'win32':\r\n- swscale = pyglet.lib.load_library('swscale-4')\r\n+if pyglet.compat_platform == 'win32':\r\n+ for libname in pyglet.options['ffmpeg_libs_win']:\r\n+ if libname.startswith('swscale'):\r\n+ swscale = pyglet.lib.load_library(libname)\r\n+ break\r\n+ else:\r\n+ # As a last resort, try to load the dll with default name.\r\n+ swscale = pyglet.lib.load_library('swscale')\r\n else:\r\n swscale = pyglet.lib.load_library('swscale')\r\n \r\n" } ]
bb15b5af1f7af13c8deecadf7bef5a78b8fc366c
pyglet/pyglet
04.09.2017 09:01:49
BSD 3-Clause New or Revised License
Refactor FFmpegSource Using 2 queues for video packets and audio packets. Whenever the queues have space, more packets are read from the stream. This work will allow to remove more easily the audio thread in favor of a scheduled call to refill the audio player.
[ { "change_type": "MODIFY", "old_path": "pyglet/media/sources/ffmpeg.py", "new_path": "pyglet/media/sources/ffmpeg.py", "diff": "@@ -109,6 +109,15 @@ class VideoPacket(object):\n self.id = self._next_id\n self.__class__._next_id += 1\n \n+\n+class AudioPacket(object):\n+ def __init__(self, packet):\n+ self.timestamp = timestamp_from_ffmpeg(packet.timestamp)\n+ self.data = (ctypes.c_uint8 * packet.size)()\n+ self.size = packet.size\n+ ctypes.memmove(self.data, packet.data, self.size)\n+\n+\n class FFmpegSource(StreamingSource):\n def __init__(self, filename, file=None):\n if file is not None:\n@@ -182,16 +191,19 @@ class FFmpegSource(StreamingSource):\n \n # Timestamp of last video packet added to decoder queue.\n self._video_timestamp = 0\n- self._buffered_audio_data = deque()\n+\n+ self.audioq = deque()\n+ self._max_len_audioq = 50 # Need to figure out a correct amount\n if self.audio_format:\n self._audio_buffer = \\\n (ctypes.c_uint8 * av.ffmpeg_get_audio_buffer_size())()\n \n if self.video_format:\n- self._video_packets = deque()\n- # self._decode_thread = WorkerThread()\n- # self._decode_thread.start()\n- self._condition = threading.Condition()\n+ self.videoq = deque()\n+ self._max_len_videoq = 25 # Need to figure out a correct amount\n+ # Flag to determine if the _fillq method was already scheduled\n+ self._fillq_scheduled = False\n+ self._fillq()\n \n def __del__(self):\n if _debug:\n@@ -215,31 +227,110 @@ class FFmpegSource(StreamingSource):\n print('FFmpeg seek', timestamp)\n \n av.ffmpeg_seek_file(self._file, timestamp_to_ffmpeg(timestamp))\n-\n- self._audio_packet_size = 0\n del self._events[:]\n- self._buffered_audio_data.clear()\n+ self._clear_video_audio_queues()\n+ self._fillq()\n+ # Consume video and audio packets until we arrive at the correct\n+ # timestamp location\n+ while True:\n+ if self.audioq[0].timestamp < self.videoq[0].timestamp:\n+ if self.audioq[0].timestamp <= timestamp < self.audioq[1].timestamp:\n+ break\n+ else:\n+ self._get_audio_packet()\n+ else:\n+ if self.videoq[0].timestamp <= timestamp < self.videoq[1].timestamp:\n+ break\n+ else:\n+ self.get_next_video_frame()\n+ if len(self.audioq) == 1 or len(self.videoq) == 1:\n+ # No more packets to read.\n+ # The queues are only left with 1 packet each because we have\n+ # reached then end of the stream.\n+ break\n \n+ def _append_audio_data(self, audio_data):\n+ self.audioq.append(audio_data)\n+ assert len(self.audioq) <= self._max_len_audioq\n \n- if self.video_format:\n- self._video_timestamp = 0\n- with self._condition:\n- for packet in self._video_packets:\n- packet.image = None\n- self._condition.notify()\n- self._video_packets.clear()\n- # self._decode_thread.clear_jobs()\n+ def _append_video_packet(self, video_packet):\n+ self.videoq.append(video_packet)\n+ assert len(self.videoq) <= self._max_len_audioq\n+\n+ def _get_audio_packet(self):\n+ \"\"\"Take an audio packet from the queue.\n+\n+ This function will schedule its `_fillq` function to fill up\n+ the queues if space is available. Multiple calls to this method will\n+ only result in one scheduled call to `_fillq`.\n+ \"\"\"\n+ audio_data = self.audioq.popleft()\n+ low_lvl = self._check_low_level()\n+ if not low_lvl and not self._fillq_scheduled:\n+ pyglet.clock.schedule_once(lambda dt:self._fillq(), 0)\n+ self._fillq_scheduled = True\n+ return audio_data\n+\n+ def _get_video_packet(self):\n+ \"\"\"Take an video packet from the queue.\n+\n+ This function will schedule its `_fillq` function to fill up\n+ the queues if space is available. Multiple calls to this method will\n+ only result in one scheduled call to `_fillq`.\n+ \"\"\"\n+ video_packet = self.videoq.popleft()\n+ low_lvl = self._check_low_level()\n+ if not low_lvl and not self._fillq_scheduled:\n+ pyglet.clock.schedule_once(lambda dt:self._fillq(), 0)\n+ self._fillq_scheduled = True\n+ return video_packet\n+\n+ def _clear_video_audio_queues(self):\n+ \"Empty both audio and video queues.\"\n+ self.audioq.clear()\n+ self.videoq.clear()\n+\n+ def _fillq(self):\n+ \"Fill up both Audio and Video queues if space is available in both\"\n+ # We clear our flag.\n+ self._fillq_scheduled = False\n+ while (len(self.audioq) < self._max_len_audioq and\n+ len(self.videoq) < self._max_len_videoq):\n+ if self._get_packet():\n+ self._process_packet()\n+ else:\n+ break\n+ # Should maybe record that end of stream is reached in an\n+ # instance member.\n+\n+ def _check_low_level(self):\n+ \"\"\"Check if both audio and video queues are getting very low.\n+\n+ If one of them has less than 2 elements, we fill the queue immediately\n+ with new packets. We don't wait for a scheduled call because we need\n+ them immediately.\n+\n+ This would normally happens only during seek operations where we\n+ consume many packets to find the correct timestamp.\n+ \"\"\"\n+ if len(self.audioq) < 2 or len(self.videoq) < 2:\n+ assert len(self.audioq) < self._max_len_audioq\n+ assert len(self.videoq) < self._max_len_audioq\n+ self._fillq()\n+ return True\n+ return False\n \n def _get_packet(self):\n # Read a packet into self._packet. Returns True if OK, False if no\n # more packets are in stream.\n return av.ffmpeg_read(self._file, self._packet) == FFMPEG_RESULT_OK\n \n- def _process_packet(self, compensation_time=0.0):\n- # Returns (packet_type, packet), where packet_type = 'video' or\n- # 'audio'; and packet is VideoPacket or AudioData. In either case,\n- # packet is buffered or queued for decoding; no further action is\n- # necessary. Returns (None, None) if packet was neither type.\n+ def _process_packet(self):\n+ \"\"\"Process the packet that has been just read.\n+\n+ Determines whether it's a video or audio packet and queue it in the\n+ appropriate queue.\n+ \"\"\"\n if self._packet.stream_index == self._video_stream_index:\n if self._packet.timestamp < 0:\n # XXX TODO\n@@ -247,7 +338,11 @@ class FFmpegSource(StreamingSource):\n # some containers (OGG?). See\n # http://www.dranger.com/ffmpeg/tutorial05.html\n # For now we just drop these frames.\n- return None, None\n+ # New note: not sure this is a valid comment. B frames will\n+ # have a correct pts and are re-ordered by the decoder.\n+ # Wonder if this is ever happening...\n+ # TODO: check if we ever get a negative timestamp\n+ return\n \n video_packet = VideoPacket(self._packet)\n \n@@ -257,48 +352,30 @@ class FFmpegSource(StreamingSource):\n \n self._video_timestamp = max(self._video_timestamp,\n video_packet.timestamp)\n- self._video_packets.append(video_packet)\n- return 'video', video_packet\n+ self._append_video_packet(video_packet)\n+ return video_packet\n \n elif self._packet.stream_index == self._audio_stream_index:\n- audio_data = self._decode_audio_packet(compensation_time)\n- if audio_data:\n- if _debug:\n- print('Got an audio packet at', audio_data.timestamp)\n- self._buffered_audio_data.append(audio_data)\n- return 'audio', audio_data\n-\n- return None, None\n-\n+ audio_packet = AudioPacket(self._packet)\n+ self._append_audio_data(audio_packet)\n+ return audio_packet\n+ \n def get_audio_data(self, bytes, compensation_time=0.0):\n try:\n- audio_data = self._buffered_audio_data.popleft()\n- audio_data_timeend = audio_data.timestamp + audio_data.duration\n+ audio_packet = self._get_audio_packet()\n except IndexError:\n audio_data = None\n audio_data_timeend = self._video_timestamp + 1\n+ else:\n+ audio_data = self._decode_audio_packet(audio_packet, compensation_time)\n+ audio_data_timeend = audio_data.timestamp + audio_data.duration\n \n if _debug:\n print('get_audio_data')\n \n- # Keep reading packets until we have an audio packet and all the\n- # associated video packets have been enqueued on the decoder thread.\n- while not audio_data or (\n- self._video_stream and self._video_timestamp < audio_data_timeend):\n- if not self._get_packet():\n- break\n-\n- packet_type, packet = self._process_packet(compensation_time)\n-\n- if not audio_data and packet_type == 'audio':\n- audio_data = self._buffered_audio_data.popleft()\n- if _debug:\n- print('Got requested audio packet at', audio_data.timestamp)\n- audio_data_timeend = audio_data.timestamp + audio_data.duration\n-\n if not audio_data:\n if _debug:\n- print('get_audio_data returning None')\n+ print('No more audio data. get_audio_data returning None')\n return None\n \n while self._events and self._events[0].timestamp <= audio_data_timeend:\n@@ -313,8 +390,7 @@ class FFmpegSource(StreamingSource):\n print('remaining events are', self._events)\n return audio_data\n \n- def _decode_audio_packet(self, compensation_time):\n- packet = self._packet\n+ def _decode_audio_packet(self, packet, compensation_time):\n size_out = ctypes.c_int(len(self._audio_buffer))\n \n while True:\n@@ -327,7 +403,6 @@ class FFmpegSource(StreamingSource):\n self._audio_buffer, size_out,\n compensation_time)\n except FFmpegException:\n- self._audio_packet_size = 0\n break\n \n audio_packet_ptr.value += used\n@@ -335,20 +410,13 @@ class FFmpegSource(StreamingSource):\n \n if size_out.value <= 0:\n break\n-\n- # XXX how did this ever work? replaced with copy below\n- # buffer = ctypes.string_at(self._audio_buffer, size_out)\n-\n- # XXX to actually copy the data.. but it never used to crash, so\n- # maybe I'm missing something\n \n buffer = ctypes.create_string_buffer(size_out.value)\n ctypes.memmove(buffer, self._audio_buffer, len(buffer))\n buffer = buffer.raw\n \n duration = float(len(buffer)) / self.audio_format.bytes_per_second\n- self._audio_packet_timestamp = \\\n- timestamp = timestamp_from_ffmpeg(packet.timestamp)\n+ timestamp = packet.timestamp\n return AudioData(buffer, len(buffer), timestamp, duration, []) \n \n def _decode_video_packet(self, packet):\n@@ -384,51 +452,28 @@ class FFmpegSource(StreamingSource):\n # ps = pstats.Stats(pr).sort_stats(\"cumulative\")\n # ps.print_stats()\n \n- def _ensure_video_packets(self):\n- \"\"\"Process packets until a video packet has been queued (and begun\n- decoding). Return False if EOS.\n- \"\"\"\n- if not self._video_packets:\n- if _debug:\n- print('No video packets...')\n- # Read ahead until we have another video packet but quit reading\n- # after 15 frames, in case there is no more video packets\n- for i in range(15):\n- if not self._get_packet():\n- return False\n- packet_type, _ = self._process_packet()\n- if packet_type and packet_type == 'video':\n- break\n- if packet_type is None or packet_type == 'audio':\n- return False\n-\n- if _debug:\n- print('Queued packet', _)\n- return True\n-\n def get_next_video_timestamp(self):\n if not self.video_format:\n return\n \n- if self._ensure_video_packets():\n- if _debug:\n- print('Next video timestamp is', self._video_packets[0].timestamp)\n- return self._video_packets[0].timestamp\n+ if self.videoq:\n+ ts = self.videoq[0].timestamp\n+ else:\n+ ts = None\n+ if _debug:\n+ print('Next video timestamp is', ts)\n+ return ts\n \n def get_next_video_frame(self):\n if not self.video_format:\n return\n \n- if self._ensure_video_packets():\n- packet = self._video_packets.popleft()\n- if _debug:\n- print('Waiting for', packet)\n-\n- self._decode_video_packet(packet)\n+ video_packet = self._get_video_packet()\n+ self._decode_video_packet(video_packet)\n \n- if _debug:\n- print('Returning', packet)\n- return packet.image\n+ if _debug:\n+ print('Returning', video_packet)\n+ return video_packet.image\n \n av.ffmpeg_init()\n if pyglet.options['debug_media']:\n" } ]
fc45a383d14b85a54104e2bffe24035f2caa103c
pyglet/pyglet
06.09.2017 19:13:10
BSD 3-Clause New or Revised License
Remove thread in DirectSound We use instead pyglet.clock.schedule_interval_soft to regularly refill the DirectSound buffer with new audio data.
[ { "change_type": "MODIFY", "old_path": "pyglet/media/drivers/directsound/adaptation.py", "new_path": "pyglet/media/drivers/directsound/adaptation.py", "diff": "@@ -35,8 +35,8 @@ from __future__ import absolute_import, print_function\n \r\n import ctypes\r\n import math\r\n-import threading\r\n \r\n+import pyglet\r\n from . import interface\r\n from pyglet.debug import debug_print\r\n from pyglet.media.events import MediaEvent\r\n@@ -84,13 +84,6 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n self.driver = driver\r\n self._ds_driver = ds_driver\r\n \r\n- # Locking strategy:\r\n- # All DirectSound calls should be locked. All instance vars relating\r\n- # to buffering/filling/time/events should be locked (used by both\r\n- # application and worker thread). Other instance vars (consts and\r\n- # 3d vars) do not need to be locked.\r\n- self._lock = threading.RLock()\r\n-\r\n # Desired play state (may be actually paused due to underrun -- not\r\n # implemented yet).\r\n self._playing = False\r\n@@ -133,69 +126,58 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n \r\n self.refill(self._buffer_size)\r\n \r\n- def __del__(self):\r\n- try:\r\n- self.delete()\r\n- except:\r\n- pass\r\n-\r\n- def delete(self):\r\n- if self.driver and self.driver.worker:\r\n- self.driver.worker.remove(self)\r\n-\r\n- with self._lock:\r\n- self._ds_buffer = None\r\n-\r\n def play(self):\r\n assert _debug('DirectSound play')\r\n- self.driver.worker.add(self)\r\n+ pyglet.clock.schedule_interval_soft(self._check_refill, 0.1)\r\n \r\n- with self._lock:\r\n- if not self._playing:\r\n- self._get_audiodata() # prebuffer if needed\r\n- self._playing = True\r\n- self._ds_buffer.play()\r\n+ if not self._playing:\r\n+ self._get_audiodata() # prebuffer if needed\r\n+ self._playing = True\r\n+ self._ds_buffer.play()\r\n \r\n assert _debug('return DirectSound play')\r\n \r\n def stop(self):\r\n assert _debug('DirectSound stop')\r\n- if self.driver and self.driver.worker:\r\n- self.driver.worker.remove(self)\r\n+ # if self.driver and self.driver.worker:\r\n+ # self.driver.worker.remove(self)\r\n+ pyglet.clock.unschedule(self._check_refill)\r\n \r\n- with self._lock:\r\n- if self._playing:\r\n- self._playing = False\r\n- self._ds_buffer.stop()\r\n+ if self._playing:\r\n+ self._playing = False\r\n+ self._ds_buffer.stop()\r\n \r\n assert _debug('return DirectSound stop')\r\n \r\n def clear(self):\r\n assert _debug('DirectSound clear')\r\n- with self._lock:\r\n- self._ds_buffer.current_position = 0\r\n- self._play_cursor_ring = self._write_cursor_ring = 0\r\n- self._play_cursor = self._write_cursor\r\n- self._eos_cursor = None\r\n- self._audiodata_buffer = None\r\n- del self._events[:]\r\n- del self._timestamps[:]\r\n+ self._ds_buffer.current_position = 0\r\n+ self._play_cursor_ring = self._write_cursor_ring = 0\r\n+ self._play_cursor = self._write_cursor\r\n+ self._eos_cursor = None\r\n+ self._audiodata_buffer = None\r\n+ del self._events[:]\r\n+ del self._timestamps[:]\r\n+\r\n+ def _check_refill(self, dt): # Need a better name!\r\n+ write_size = self.get_write_size()\r\n+ if write_size > self.min_buffer_size:\r\n+ self.refill(write_size)\r\n \r\n def refill(self, write_size):\r\n- with self._lock:\r\n- while write_size > 0:\r\n- assert _debug('refill, write_size =', write_size)\r\n- audio_data = self._get_audiodata()\r\n-\r\n- if audio_data is not None:\r\n- assert _debug('write', audio_data.length)\r\n- length = min(write_size, audio_data.length)\r\n- self.write(audio_data, length)\r\n- write_size -= length\r\n- else:\r\n- assert _debug('write silence')\r\n- self.write(None, write_size)\r\n- write_size = 0\r\n+ while write_size > 0:\r\n+ assert _debug('refill, write_size =', write_size)\r\n+ audio_data = self._get_audiodata()\r\n+\r\n+ if audio_data is not None:\r\n+ assert _debug('write', audio_data.length)\r\n+ length = min(write_size, audio_data.length)\r\n+ self.write(audio_data, length)\r\n+ write_size -= length\r\n+ else:\r\n+ assert _debug('write silence')\r\n+ self.write(None, write_size)\r\n+ write_size = 0\r\n \r\n def _has_underrun(self):\r\n return (self._eos_cursor is not None\r\n@@ -259,35 +241,32 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n (ts_cursor, audio_data.timestamp + audio_data.duration))\r\n \r\n def update_play_cursor(self):\r\n- with self._lock:\r\n- play_cursor_ring = self._ds_buffer.current_position.play_cursor\r\n- if play_cursor_ring < self._play_cursor_ring:\r\n- # Wrapped around\r\n- self._play_cursor += self._buffer_size - self._play_cursor_ring\r\n- self._play_cursor_ring = 0\r\n- self._play_cursor += play_cursor_ring - self._play_cursor_ring\r\n- self._play_cursor_ring = play_cursor_ring\r\n+ play_cursor_ring = self._ds_buffer.current_position.play_cursor\r\n+ if play_cursor_ring < self._play_cursor_ring:\r\n+ # Wrapped around\r\n+ self._play_cursor += self._buffer_size - self._play_cursor_ring\r\n+ self._play_cursor_ring = 0\r\n+ self._play_cursor += play_cursor_ring - self._play_cursor_ring\r\n+ self._play_cursor_ring = play_cursor_ring\r\n \r\n self._dispatch_pending_events()\r\n self._cleanup_timestamps()\r\n self._check_underrun()\r\n \r\n def _dispatch_pending_events(self):\r\n- with self._lock:\r\n- pending_events = []\r\n- while self._events and self._events[0][0] <= self._play_cursor:\r\n- _, event = self._events.pop(0)\r\n- pending_events.append(event)\r\n- assert _debug('Dispatching pending events: {}'.format(pending_events))\r\n- assert _debug('Remaining events: {}'.format(self._events))\r\n+ pending_events = []\r\n+ while self._events and self._events[0][0] <= self._play_cursor:\r\n+ _, event = self._events.pop(0)\r\n+ pending_events.append(event)\r\n+ assert _debug('Dispatching pending events: {}'.format(pending_events))\r\n+ assert _debug('Remaining events: {}'.format(self._events))\r\n \r\n for event in pending_events:\r\n event._sync_dispatch_to_player(self.player)\r\n \r\n def _cleanup_timestamps(self):\r\n- with self._lock:\r\n- while self._timestamps and self._timestamps[0][0] < self._play_cursor:\r\n- del self._timestamps[0]\r\n+ while self._timestamps and self._timestamps[0][0] < self._play_cursor:\r\n+ del self._timestamps[0]\r\n \r\n def _check_underrun(self):\r\n if self._playing and self._has_underrun():\r\n@@ -299,9 +278,8 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n def get_write_size(self):\r\n self.update_play_cursor()\r\n \r\n- with self._lock:\r\n- play_cursor = self._play_cursor\r\n- write_cursor = self._write_cursor\r\n+ play_cursor = self._play_cursor\r\n+ write_cursor = self._write_cursor\r\n \r\n return self._buffer_size - max(write_cursor - play_cursor, 0)\r\n \r\n@@ -310,91 +288,82 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n if length == 0:\r\n return 0\r\n \r\n- with self._lock:\r\n- write_ptr = self._ds_buffer.lock(self._write_cursor_ring, length)\r\n- assert 0 < length <= self._buffer_size\r\n- assert length == write_ptr.audio_length_1.value + write_ptr.audio_length_2.value\r\n-\r\n- if audio_data:\r\n- ctypes.memmove(write_ptr.audio_ptr_1, audio_data.data, write_ptr.audio_length_1.value)\r\n- audio_data.consume(write_ptr.audio_length_1.value, self.source_group.audio_format)\r\n- if write_ptr.audio_length_2.value > 0:\r\n- ctypes.memmove(write_ptr.audio_ptr_2, audio_data.data, write_ptr.audio_length_2.value)\r\n- audio_data.consume(write_ptr.audio_length_2.value, self.source_group.audio_format)\r\n+ write_ptr = self._ds_buffer.lock(self._write_cursor_ring, length)\r\n+ assert 0 < length <= self._buffer_size\r\n+ assert length == write_ptr.audio_length_1.value + write_ptr.audio_length_2.value\r\n+\r\n+ if audio_data:\r\n+ ctypes.memmove(write_ptr.audio_ptr_1, audio_data.data, write_ptr.audio_length_1.value)\r\n+ audio_data.consume(write_ptr.audio_length_1.value, self.source_group.audio_format)\r\n+ if write_ptr.audio_length_2.value > 0:\r\n+ ctypes.memmove(write_ptr.audio_ptr_2, audio_data.data, write_ptr.audio_length_2.value)\r\n+ audio_data.consume(write_ptr.audio_length_2.value, self.source_group.audio_format)\r\n+ else:\r\n+ if self.source_group.audio_format.sample_size == 8:\r\n+ c = 0x80\r\n else:\r\n- if self.source_group.audio_format.sample_size == 8:\r\n- c = 0x80\r\n- else:\r\n- c = 0\r\n- ctypes.memset(write_ptr.audio_ptr_1, c, write_ptr.audio_length_1.value)\r\n- if write_ptr.audio_length_2.value > 0:\r\n- ctypes.memset(write_ptr.audio_ptr_2, c, write_ptr.audio_length_2.value)\r\n- self._ds_buffer.unlock(write_ptr)\r\n-\r\n- self._write_cursor += length\r\n- self._write_cursor_ring += length\r\n- self._write_cursor_ring %= self._buffer_size\r\n+ c = 0\r\n+ ctypes.memset(write_ptr.audio_ptr_1, c, write_ptr.audio_length_1.value)\r\n+ if write_ptr.audio_length_2.value > 0:\r\n+ ctypes.memset(write_ptr.audio_ptr_2, c, write_ptr.audio_length_2.value)\r\n+ self._ds_buffer.unlock(write_ptr)\r\n+\r\n+ self._write_cursor += length\r\n+ self._write_cursor_ring += length\r\n+ self._write_cursor_ring %= self._buffer_size\r\n \r\n def seek(self, timestamp):\r\n self.audio_diff_avg_count = 0\r\n self.audio_diff_cum = 0.0\r\n- with self._lock:\r\n- while True:\r\n- audio_data = self._get_audiodata()\r\n- assert _debug(\"Seeking audio timestamp {:.2f} sec. \"\r\n- \"Got audio packet starting at {:.2f} sec\".format(\r\n- timestamp, audio_data.timestamp))\r\n- if timestamp <= (audio_data.timestamp + audio_data.duration):\r\n- break\r\n- \r\n- self._audiodata_buffer = None\r\n- del self._events[:]\r\n- del self._timestamps[:]\r\n+ while True:\r\n+ audio_data = self._get_audiodata()\r\n+ assert _debug(\"Seeking audio timestamp {:.2f} sec. \"\r\n+ \"Got audio packet starting at {:.2f} sec\".format(\r\n+ timestamp, audio_data.timestamp))\r\n+ if timestamp <= (audio_data.timestamp + audio_data.duration):\r\n+ break\r\n+ \r\n+ self._audiodata_buffer = None\r\n+ del self._events[:]\r\n+ del self._timestamps[:]\r\n \r\n- if audio_data is not None:\r\n- assert _debug('write', audio_data.length)\r\n- self.write(audio_data, audio_data.length)\r\n+ if audio_data is not None:\r\n+ assert _debug('write', audio_data.length)\r\n+ self.write(audio_data, audio_data.length)\r\n \r\n def get_time(self):\r\n self.update_play_cursor()\r\n- with self._lock:\r\n- if self._timestamps:\r\n- cursor, ts = self._timestamps[0]\r\n- result = ts + (self._play_cursor - cursor) / \\\r\n- float(self.source_group.audio_format.bytes_per_second)\r\n- else:\r\n- result = None\r\n+ if self._timestamps:\r\n+ cursor, ts = self._timestamps[0]\r\n+ result = ts + (self._play_cursor - cursor) / \\\r\n+ float(self.source_group.audio_format.bytes_per_second)\r\n+ else:\r\n+ result = None\r\n \r\n return result\r\n \r\n def set_volume(self, volume):\r\n- with self._lock:\r\n- self._ds_buffer.volume = _gain2db(volume)\r\n+ self._ds_buffer.volume = _gain2db(volume)\r\n \r\n def set_position(self, position):\r\n if self._ds_buffer.is3d:\r\n- with self._lock:\r\n- self._ds_buffer.position = _convert_coordinates(position)\r\n+ self._ds_buffer.position = _convert_coordinates(position)\r\n \r\n def set_min_distance(self, min_distance):\r\n if self._ds_buffer.is3d:\r\n- with self._lock:\r\n- self._ds_buffer.min_distance = min_distance\r\n+ self._ds_buffer.min_distance = min_distance\r\n \r\n def set_max_distance(self, max_distance):\r\n if self._ds_buffer.is3d:\r\n- with self._lock:\r\n- self._ds_buffer.max_distance = max_distance\r\n+ self._ds_buffer.max_distance = max_distance\r\n \r\n def set_pitch(self, pitch):\r\n frequency = int(pitch * self.source_group.audio_format.sample_rate)\r\n- with self._lock:\r\n- self._ds_buffer.frequency = frequency\r\n+ self._ds_buffer.frequency = frequency\r\n \r\n def set_cone_orientation(self, cone_orientation):\r\n if self._ds_buffer.is3d:\r\n- with self._lock:\r\n- self._ds_buffer.cone_orientation = _convert_coordinates(cone_orientation)\r\n+ self._ds_buffer.cone_orientation = _convert_coordinates(cone_orientation)\r\n \r\n def set_cone_inner_angle(self, cone_inner_angle):\r\n if self._ds_buffer.is3d:\r\n@@ -409,14 +378,12 @@ class DirectSoundAudioPlayer(AbstractAudioPlayer):\n def _set_cone_angles(self):\r\n inner = min(self._cone_inner_angle, self._cone_outer_angle)\r\n outer = max(self._cone_inner_angle, self._cone_outer_angle)\r\n- with self._lock:\r\n- self._ds_buffer.set_cone_angles(inner, outer)\r\n+ self._ds_buffer.set_cone_angles(inner, outer)\r\n \r\n def set_cone_outer_gain(self, cone_outer_gain):\r\n if self._ds_buffer.is3d:\r\n volume = _gain2db(cone_outer_gain)\r\n- with self._lock:\r\n- self._ds_buffer.cone_outside_volume = volume\r\n+ self._ds_buffer.cone_outside_volume = volume\r\n \r\n def prefill_audio(self):\r\n write_size = self.get_write_size()\r\n" } ]
07d12a3cf97fa031831d2377695ed4c130fe5701
pyglet/pyglet
13.07.2018 10:13:56
BSD 3-Clause New or Revised License
Fix Player when playing audio source with silent audio driver. When there is no video but only audio in the Source and the audio driver is silent, the Player was never finished playing. Now it dispatches correctly the "on_eos" event after the duration.
[ { "change_type": "MODIFY", "old_path": "pyglet/media/player.py", "new_path": "pyglet/media/player.py", "diff": "@@ -184,7 +184,7 @@ class Player(pyglet.event.EventDispatcher):\n source = iter(source)\n except TypeError:\n raise TypeError(\"source must be either a Source or an iterable.\"\n- \" Received type {0}\".format(type(source)))\n+ \" Received type {0}\".format(type(source)))\n self._playlists.append(source)\n \n if self.source is None:\n@@ -225,6 +225,12 @@ class Player(pyglet.event.EventDispatcher):\n # Negative number means audio runs ahead.\n # self._mclock._systime += -0.3\n self._mclock.play()\n+ if self._audio_player is None and source.video_format is None:\n+ pyglet.clock.schedule_once(\n+ lambda dt: self.dispatch_event(\"on_eos\"),\n+ source.duration,\n+ )\n+\n else:\n if self._audio_player:\n self._audio_player.stop()\n@@ -364,7 +370,6 @@ class Player(pyglet.event.EventDispatcher):\n audio_driver = get_audio_driver()\n if audio_driver is None:\n # Failed to find a valid audio driver\n- self.source.audio_format = None\n return\n \n self._audio_player = audio_driver.create_audio_player(source, self)\n@@ -379,7 +384,7 @@ class Player(pyglet.event.EventDispatcher):\n @property\n def source(self):\n \"\"\"Source: Read-only. The current :class:`Source`, or ``None``.\"\"\"\n- return self._source \n+ return self._source\n \n @property\n def time(self):\n@@ -637,6 +642,7 @@ Player.register_event_type('on_player_next_source')\n def _one_item_playlist(source):\n yield source\n \n+\n class PlayerGroup(object):\n \"\"\"Group of players that can be played and paused simultaneously.\n \n" }, { "change_type": "MODIFY", "old_path": "tests/integration/media/mock_player.py", "new_path": "tests/integration/media/mock_player.py", "diff": "@@ -1,5 +1,6 @@\n from __future__ import absolute_import, print_function\n import pyglet\n+import pytest\n _debug = False\n \n \n@@ -13,8 +14,10 @@ class MockPlayer(object):\n def dispatch_event(self, event_type, *args):\n super(MockPlayer, self).dispatch_event(event_type, *args)\n if _debug:\n- print('{}: event {} received @ {}'.format(self.__class__.__name__,\n- \tevent_type, self.pyclock.time()))\n+ print('{}: event {} received @ {}'.format(\n+ self.__class__.__name__,\n+ event_type, self.pyclock.time()\n+ ))\n self.events.append((event_type, args))\n pyglet.clock.unschedule(self.event_loop.interrupt_event_loop)\n self.event_loop.interrupt_event_loop()\n@@ -25,8 +28,8 @@ class MockPlayer(object):\n while self.pyclock.time() < end_time:\n if _debug:\n print('{}: run for {} sec @ {}'.format(self.__class__.__name__,\n- \tend_time-self.pyclock.time(), self.pyclock.time()))\n- self.event_loop.run_event_loop(duration=end_time-self.pyclock.time())\n+ end_time - self.pyclock.time(), self.pyclock.time()))\n+ self.event_loop.run_event_loop(duration=end_time - self.pyclock.time())\n if not self.events:\n continue\n event_type, args = self.events.pop()\n@@ -45,10 +48,10 @@ class MockPlayer(object):\n event_type, args = self.wait_for_event(timeout, *expected_events)\n if _debug:\n print('{}: got event {} @ {}'.format(self.__class__.__name__,\n- \tevent_type, self.pyclock.time()))\n+ event_type, self.pyclock.time()))\n if event_type is None and self.pyclock.time() >= end_time:\n- pytest.fail('Timeout before all events have been received. Still waiting for: '\n- + ','.join(expected_events))\n+ pytest.fail('Timeout before all events have been received. '\n+ 'Still waiting for: ' + ','.join(expected_events))\n elif event_type is not None:\n if event_type in expected_events:\n expected_events.remove(event_type)\n@@ -59,6 +62,6 @@ class MockPlayer(object):\n now = self.pyclock.time()\n end_time = now + timeout\n while now - end_time < -0.005:\n- duration = max(.01, end_time-now)\n+ duration = max(.01, end_time - now)\n self.event_loop.run_event_loop(duration=duration)\n- now = self.pyclock.time()\n\\ No newline at end of file\n+ now = self.pyclock.time()\n" }, { "change_type": "MODIFY", "old_path": "tests/integration/media/test_player.py", "new_path": "tests/integration/media/test_player.py", "diff": "@@ -2,11 +2,9 @@ from __future__ import print_function\n from future import standard_library\n standard_library.install_aliases()\n \n-import gc\n import pytest\n from tests import mock\n import time\n-import unittest\n \n import pyglet\n _debug = False\n@@ -26,6 +24,7 @@ class PlayerTest(MockPlayer, Player):\n def player(event_loop):\n return PlayerTest(event_loop)\n \n+\n class SilentTestSource(Silence):\n def __init__(self, duration, sample_rate=44800, sample_size=16):\n super(Silence, self).__init__(duration, sample_rate, sample_size)\n@@ -41,14 +40,16 @@ class SilentTestSource(Silence):\n return self.bytes_read == self._max_offset\n \n \n-\n def test_player_play(player):\n source = SilentTestSource(.1)\n player.queue(source)\n \n player.play()\n- player.wait_for_all_events(1., \n- 'on_eos', 'on_player_eos')\n+ player.wait_for_all_events(\n+ 1.,\n+ 'on_eos',\n+ 'on_player_eos'\n+ )\n assert source.has_fully_played(), 'Source not fully played'\n \n \n@@ -58,17 +59,35 @@ def test_player_play_multiple(player):\n player.queue(source)\n \n player.play()\n- player.wait_for_all_events(1., \n- 'on_eos', 'on_player_next_source', 'on_eos', 'on_player_eos')\n+ player.wait_for_all_events(\n+ 1.,\n+ 'on_eos',\n+ 'on_player_next_source',\n+ 'on_eos',\n+ 'on_player_eos'\n+ )\n for source in sources:\n assert source.has_fully_played(), 'Source not fully played'\n \n \n def test_multiple_fire_and_forget_players():\n \"\"\"\n- Test an issue where the driver crashed when starting multiple players, but not keeping a\n- reference to these players.\n+ Test an issue where the driver crashed when starting multiple players, but not\n+ keeping a reference to these players.\n \"\"\"\n for _ in range(10):\n Silence(1).play()\n time.sleep(1)\n+\n+\n+def test_player_silent_audio_driver(player):\n+ with mock.patch('pyglet.media.player.get_audio_driver') as get_audio_driver_mock:\n+ get_audio_driver_mock.return_value = None\n+ source = SilentTestSource(.1)\n+ player.queue(source)\n+ player.play()\n+\n+ player.wait_for_all_events(\n+ 1.,\n+ 'on_eos',\n+ 'on_player_eos')\n" }, { "change_type": "MODIFY", "old_path": "tests/unit/media/test_player.py", "new_path": "tests/unit/media/test_player.py", "diff": "@@ -1,20 +1,13 @@\n from __future__ import division\n from builtins import range\n-import ctypes\n from tests import mock\n-import os\n import random\n-from collections import deque\n-from itertools import product\n from tests.base.future_test import FutureTestCase\n \n-import pytest\n-\n-import pyglet\n from pyglet.media.player import Player, PlayerGroup\n-from pyglet.media.codecs.base import *\n+from pyglet.media.codecs.base import AudioFormat, VideoFormat, Source\n \n-#pyglet.options['debug_media'] = True\n+# pyglet.options['debug_media'] = True\n \n \n class PlayerTestCase(FutureTestCase):\n@@ -65,12 +58,13 @@ class PlayerTestCase(FutureTestCase):\n return mock_source\n \n def set_video_data_for_mock_source(self, mock_source, timestamp_data_pairs):\n- \"\"\"Make the given mock source return video data. Video data is given in pairs of timestamp\n- and data to return.\"\"\"\n+ \"\"\"Make the given mock source return video data. Video data is given in pairs of\n+ timestamp and data to return.\"\"\"\n def _get_frame():\n if timestamp_data_pairs:\n current_frame = timestamp_data_pairs.pop(0)\n return current_frame[1]\n+\n def _get_timestamp():\n if timestamp_data_pairs:\n return timestamp_data_pairs[0][0]\n@@ -233,8 +227,9 @@ class PlayerTestCase(FutureTestCase):\n self.assert_now_playing(mock_source3)\n \n def test_queue_multiple_audio_sources_same_format_and_play_and_skip(self):\n- \"\"\"When multiple audio sources with the same format are queued, they are played using the\n- same driver player. Skipping to the next source is just advancing the source group.\n+ \"\"\"When multiple audio sources with the same format are queued, they are played\n+ using the same driver player. Skipping to the next source is just advancing the\n+ source group.\n \"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, None)\n mock_source2 = self.create_mock_source(self.audio_format_1, None)\n@@ -262,7 +257,8 @@ class PlayerTestCase(FutureTestCase):\n self.assert_now_playing(mock_source3)\n \n def test_on_eos(self):\n- \"\"\"The player receives on_eos for every source, but does not need to do anything.\"\"\"\n+ \"\"\"The player receives on_eos for every source, but does not need to do anything.\n+ \"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, None)\n mock_source2 = self.create_mock_source(self.audio_format_1, None)\n mock_source3 = self.create_mock_source(self.audio_format_1, None)\n@@ -296,7 +292,7 @@ class PlayerTestCase(FutureTestCase):\n self.assert_not_playing(None)\n \n def test_eos_events(self):\n- \"\"\"Test receiving various eos events: on source eos, \n+ \"\"\"Test receiving various eos events: on source eos,\n on playlist exhausted and on player eos and on player next source.\n \"\"\"\n on_eos_mock = mock.MagicMock(return_value=None)\n@@ -311,7 +307,7 @@ class PlayerTestCase(FutureTestCase):\n on_player_eos_mock.reset_mock()\n on_player_next_source_mock.reset_mock()\n \n- def assert_eos_events_received(on_eos=False, on_player_eos=False, \n+ def assert_eos_events_received(on_eos=False, on_player_eos=False,\n on_player_next_source=False):\n self.assertEqual(on_eos_mock.called, on_eos)\n self.assertEqual(on_player_eos_mock.called, on_player_eos)\n@@ -362,8 +358,10 @@ class PlayerTestCase(FutureTestCase):\n \n self.reset_mocks()\n self.player.play()\n- self.assertAlmostEqual(self.player.time, 0.5, places=2,\n- msg='While playing, player should return time from driver player')\n+ self.assertAlmostEqual(\n+ self.player.time, 0.5, places=2,\n+ msg='While playing, player should return time from driver player'\n+ )\n self.assert_driver_player_started()\n self.assert_no_new_driver_player_created()\n self.assert_now_playing(mock_source)\n@@ -415,8 +413,8 @@ class PlayerTestCase(FutureTestCase):\n self.player.delete()\n \n def test_set_player_properties_before_playing(self):\n- \"\"\"When setting player properties before a driver specific player is \n- created, these settings should be propagated after creating the \n+ \"\"\"When setting player properties before a driver specific player is\n+ created, these settings should be propagated after creating the\n player.\n \"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, None)\n@@ -460,7 +458,7 @@ class PlayerTestCase(FutureTestCase):\n assert_properties_set()\n \n def test_set_player_properties_while_playing(self):\n- \"\"\"When setting player properties while playing, the properties should \n+ \"\"\"When setting player properties while playing, the properties should\n be propagated to the driver specific player right away.\"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, None)\n mock_source2 = self.create_mock_source(self.audio_format_2, None)\n@@ -548,7 +546,7 @@ class PlayerTestCase(FutureTestCase):\n self.assert_driver_player_cleared()\n \n def test_video_queue_and_play(self):\n- \"\"\"Sources can also include video. Instead of using a player to \n+ \"\"\"Sources can also include video. Instead of using a player to\n continuously play the video, a texture is updated based on the\n video packet timestamp.\"\"\"\n mock_source = self.create_mock_source(self.audio_format_1, self.video_format_1)\n@@ -571,7 +569,7 @@ class PlayerTestCase(FutureTestCase):\n self.assertIs(self.player.texture, self.mock_texture)\n \n def test_video_seek(self):\n- \"\"\"Sources with video can also be seeked. It's the Source \n+ \"\"\"Sources with video can also be seeked. It's the Source\n responsibility to present the Player with audio and video at the\n correct time.\"\"\"\n mock_source = self.create_mock_source(self.audio_format_1, self.video_format_1)\n@@ -599,7 +597,7 @@ class PlayerTestCase(FutureTestCase):\n self.assert_texture_updated('e')\n \n def test_video_frame_rate(self):\n- \"\"\"Videos texture are scheduled according to the video packet \n+ \"\"\"Videos texture are scheduled according to the video packet\n timestamp.\"\"\"\n mock_source1 = self.create_mock_source(self.audio_format_1, self.video_format_1)\n mock_source2 = self.create_mock_source(self.audio_format_1, self.video_format_2)\n@@ -685,6 +683,13 @@ class PlayerTestCase(FutureTestCase):\n self.assert_update_texture_scheduled()\n self.assert_no_new_driver_player_created()\n \n+ def test_audio_source_with_silent_driver(self):\n+ \"\"\"An audio source with a silent driver.\"\"\"\n+ mock_source = self.create_mock_source(self.audio_format_3, None)\n+ self.mock_get_audio_driver.return_value = None\n+ self.player.queue(mock_source)\n+ self.player.play()\n+\n \n class PlayerGroupTestCase(FutureTestCase):\n def create_mock_player(self, has_audio=True):\n@@ -701,40 +706,48 @@ class PlayerGroupTestCase(FutureTestCase):\n player.play.assert_called_once_with()\n \n def assert_audio_players_started(self, *players):\n- # Find the one player that was used to start the group, the rest should not be used\n+ # Find the one player that was used to start the group,\n+ # the rest should not be used\n call_args = None\n audio_players = []\n for player in players:\n audio_player = player._audio_player\n audio_players.append(audio_player)\n if call_args is not None:\n- self.assertFalse(audio_player._play_group.called, msg='Only one player should be used to start the group')\n+ self.assertFalse(audio_player._play_group.called,\n+ msg='Only one player should be used to start the group')\n elif audio_player._play_group.called:\n call_args = audio_player._play_group.call_args\n \n- self.assertIsNotNone(call_args, msg='No player was used to start all audio players.')\n+ self.assertIsNotNone(call_args,\n+ msg='No player was used to start all audio players.')\n started_players = call_args[0][0]\n- self.assertCountEqual(started_players, audio_players, msg='Not all players with audio players were started')\n+ self.assertCountEqual(started_players, audio_players,\n+ msg='Not all players with audio players were started')\n \n def assert_players_stopped(self, *players):\n for player in players:\n player.pause.assert_called_once_with()\n \n def assert_audio_players_stopped(self, *players):\n- # Find the one player that was used to start the group, the rest should not be used\n+ # Find the one player that was used to start the group,\n+ # the rest should not be used\n call_args = None\n audio_players = []\n for player in players:\n audio_player = player._audio_player\n audio_players.append(audio_player)\n if call_args is not None:\n- self.assertFalse(audio_player._stop_group.called, msg='Only one player should be used to stop the group')\n+ self.assertFalse(audio_player._stop_group.called,\n+ msg='Only one player should be used to stop the group')\n elif audio_player._stop_group.called:\n call_args = audio_player._stop_group.call_args\n \n- self.assertIsNotNone(call_args, msg='No player was used to stop all audio players.')\n+ self.assertIsNotNone(call_args,\n+ msg='No player was used to stop all audio players.')\n stopped_players = call_args[0][0]\n- self.assertCountEqual(stopped_players, audio_players, msg='Not all players with audio players were stopped')\n+ self.assertCountEqual(stopped_players, audio_players,\n+ msg='Not all players with audio players were stopped')\n \n def reset_mocks(self, *mocks):\n for m in mocks:\n@@ -788,4 +801,3 @@ class PlayerGroupTestCase(FutureTestCase):\n group.pause()\n self.assert_audio_players_stopped(*players_with_audio)\n self.assert_players_stopped(*players)\n-\n" } ]
75e4e08636d5dfe3cb8e6796ad116af01e2c0f4a
probcomp/bayeslite
06.12.2017 19:36:49
Apache License 2.0
Change schema for population. MODEL cols AS stattype has become SET STATTYPE OF cols TO st Reasoning: The word MODEL is confusing here, we are setting the statistical data type so we should name the command just that.
[ { "change_type": "MODIFY", "old_path": "src/backend.py", "new_path": "src/backend.py", "diff": "@@ -33,7 +33,7 @@ that for illustration::\n Then you can model a table and query the probable implications of the data in\n the table::\n \n- bdb.execute('create population p for t with schema(guess stattypes for (*))')\n+ bdb.execute('create population p for t with schema(guess stattypes of (*))')\n bdb.execute('create generator p_cc for t using cgpm;')\n bdb.execute('initialize 10 models for t_cc')\n bdb.execute('analyze t_cc for 10 iterations')\n" }, { "change_type": "MODIFY", "old_path": "src/grammar.y", "new_path": "src/grammar.y", "diff": "@@ -99,9 +99,11 @@ pop_schema(many) ::= pop_schema(schema) T_SEMI pop_clause(cl).\n \n pop_clause(empty) ::= .\n pop_clause(column) ::= column_name(col) stattype(st).\n-pop_clause(model) ::= K_MODEL pop_columns(cols) K_AS stattype(st).\n+pop_clause(stattype) ::= K_SET K_STATTYPES|K_STATTYPE\n+ K_OF pop_columns(cols)\n+ K_TO stattype(st).\n pop_clause(ignore) ::= K_IGNORE pop_columns(cols).\n-pop_clause(guess) ::= K_GUESS stattypes_for_opt pop_columns_guess(cols).\n+pop_clause(guess) ::= K_GUESS stattypes_of_opt pop_columns_guess(cols).\n \n stattype_opt(none) ::= .\n stattype_opt(one) ::= stattype(st).\n@@ -114,9 +116,8 @@ pop_columns_guess(list) ::= pop_columns(cols).\n pop_columns(one) ::= column_name(c).\n pop_columns(many) ::= pop_columns(cols) T_COMMA column_name(c).\n \n-stattypes_for_opt ::= .\n-stattypes_for_opt ::= K_STATTYPES K_FOR.\n-\n+stattypes_of_opt ::= .\n+stattypes_of_opt ::= K_STATTYPE|K_STATTYPES K_OF.\n \n /* XXX Temporary generators? */\n command(creategen) ::= K_CREATE K_GENERATOR\n" }, { "change_type": "MODIFY", "old_path": "src/parse.py", "new_path": "src/parse.py", "diff": "@@ -201,7 +201,7 @@ class BQLSemantics(object):\n \n def p_pop_clause_empty(self): return None\n def p_pop_clause_column(self, col, st): return ast.PopModelVars([col], st)\n- def p_pop_clause_model(self, cols, st): return ast.PopModelVars(cols, st)\n+ def p_pop_clause_stattype(self, cols, st): return ast.PopModelVars(cols, st)\n def p_pop_clause_ignore(self, cols): return ast.PopIgnoreVars(cols)\n def p_pop_clause_guess(self, cols): return ast.PopGuessVars(cols)\n \n" }, { "change_type": "MODIFY", "old_path": "tests/test_bql.py", "new_path": "tests/test_bql.py", "diff": "@@ -107,7 +107,7 @@ def test_trivial_population():\n # XXX if (not) exists\n bdb.execute('''\n create population p for t (\n- guess stattypes for (*);\n+ guess stattypes of (*);\n age numerical\n )\n ''')\n@@ -120,7 +120,7 @@ def test_population_invalid_numerical():\n with pytest.raises(BQLError):\n bdb.execute('''\n create population p for t (\n- guess stattypes for (*);\n+ guess stattypes of (*);\n gender numerical\n )\n ''')\n@@ -131,7 +131,7 @@ def test_population_invalid_numerical_alterpop_addvar():\n bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)\n bdb.execute('''\n create population p for t (\n- guess stattypes for (*);\n+ guess stattypes of (*);\n ignore gender\n )\n ''')\n@@ -145,7 +145,7 @@ def test_population_invalid_numerical_alterpop_stattype():\n bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)\n bdb.execute('''\n create population p for t (\n- guess stattypes for (*);\n+ guess stattypes of (*);\n gender nominal\n )\n ''')\n@@ -263,8 +263,8 @@ def test_conditional_probability(seed):\n bdb.execute('''\n create population p1 for t1 (\n ignore id, label;\n- model age as numerical;\n- model weight as numerical\n+ set stattype of age to numerical;\n+ set stattype of weight to numerical\n )\n ''')\n bdb.execute('''\n" }, { "change_type": "MODIFY", "old_path": "tests/test_cgpm.py", "new_path": "tests/test_cgpm.py", "diff": "@@ -72,8 +72,9 @@ def cgpm_smoke_bdb():\n \n bdb.execute('''\n CREATE POPULATION p FOR t WITH SCHEMA(\n- MODEL output, input AS NUMERICAL;\n- MODEL cat AS CATEGORICAL\n+ output NUMERICAL;\n+ input NUMERICAL;\n+ cat NOMINAL;\n )\n ''')\n \n@@ -121,7 +122,7 @@ def test_cgpm_no_empty_categories():\n bayesdb_nullify(bdb, 'f', '')\n bdb.execute('''\n CREATE POPULATION q FOR f WITH SCHEMA (\n- MODEL a, b, c AS NOMINAL\n+ SET STATTYPES OF a, b, c TO NOMINAL\n );\n ''')\n bdb.execute('CREATE GENERATOR h IF NOT EXISTS FOR q USING cgpm;')\n@@ -360,12 +361,12 @@ def test_cgpm_kepler():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ apogee NUMERICAL;\n+ class_of_orbit CATEGORICAL;\n+ country_of_operator CATEGORICAL;\n+ launch_mass NUMERICAL;\n+ perigee NUMERICAL;\n+ period NUMERICAL\n )\n ''')\n bdb.execute('''\n@@ -490,28 +491,28 @@ def test_unknown_stattype():\n # No such statistical type at the moment.\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee, perigee, launch_mass, period\n- AS NUMERICAL;\n+ SET STATTYPES OF apogee, perigee, launch_mass, period\n+ TO NUMERICAL;\n \n- MODEL class_of_orbit, country_of_operator\n- AS NOMINAL;\n+ SET STATTYPE OF class_of_orbit, country_of_operator\n+ TO NOMINAL;\n \n- MODEL relaunches\n- AS QUAGGA\n+ SET STATTYPE OF relaunches\n+ TO QUAGGA\n )\n ''')\n # Invent the statistical type.\n bdb.sql_execute('INSERT INTO bayesdb_stattype VALUES (?)', ('quagga',))\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee, perigee, launch_mass, period\n- AS NUMERICAL;\n+ SET STATTYPES OF apogee, perigee, launch_mass, period\n+ TO NUMERICAL;\n \n- MODEL class_of_orbit, country_of_operator\n- AS NOMINAL;\n+ SET STATTYPES OF class_of_orbit, country_of_operator\n+ TO NOMINAL;\n \n- MODEL relaunches\n- AS QUAGGA\n+ SET STATTYPES OF relaunches\n+ TO QUAGGA\n )\n ''')\n registry = {\n@@ -547,12 +548,12 @@ def test_bad_analyze_vars():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ SET STATTYPE OF apogee TO NUMERICAL;\n+ SET STATTYPE OF class_of_orbit TO CATEGORICAL;\n+ SET STATTYPE OF country_of_operator TO CATEGORICAL;\n+ SET STATTYPE OF launch_mass TO NUMERICAL;\n+ SET STATTYPE OF perigee TO NUMERICAL;\n+ SET STATTYPE OF period TO NUMERICAL\n )\n ''')\n registry = {\n@@ -587,15 +588,15 @@ def test_output_stattypes():\n with pytest.raises(BQLError):\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee, launch_mass AS NUMERICAL;\n- MODEL country_of_operator AS CATEGORICAL\n+ SET STATTYPES OF apogee, launch_mass TO NUMERICAL;\n+ SET STATTYPES OF country_of_operator TO CATEGORICAL\n )\n ''')\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n IGNORE class_of_orbit, perigee, period;\n- MODEL apogee, launch_mass AS NUMERICAL;\n- MODEL country_of_operator AS CATEGORICAL\n+ SET STATTYPES OF apogee, launch_mass TO NUMERICAL;\n+ SET STATTYPES OF country_of_operator TO CATEGORICAL\n )\n ''')\n registry = {\n@@ -706,7 +707,7 @@ def test_initialize_with_all_nulls():\n # Fail when a is numerical and modeled by crosscat.\n bdb.execute('''\n CREATE POPULATION p FOR t WITH SCHEMA(\n- MODEL a, b, c AS NUMERICAL\n+ SET STATTYPES OF a, b, c TO NUMERICAL\n )\n ''')\n bdb.execute('''\n@@ -720,8 +721,8 @@ def test_initialize_with_all_nulls():\n # Fail when a is nominal and modeled by crosscat.\n bdb.execute('''\n CREATE POPULATION p2 FOR t WITH SCHEMA(\n- MODEL a AS NOMINAL;\n- MODEL b, c AS NUMERICAL\n+ SET STATTYPES OF a TO NOMINAL;\n+ SET STATTYPES OF b, c TO NUMERICAL\n )\n ''')\n bdb.execute('CREATE GENERATOR m2 FOR p2;')\n@@ -732,7 +733,7 @@ def test_initialize_with_all_nulls():\n bdb.execute('''\n CREATE POPULATION p3 FOR t WITH SCHEMA(\n IGNORE a;\n- MODEL b, c AS NUMERICAL\n+ SET STATTYPES OF b, c TO NUMERICAL\n )\n ''')\n bdb.execute('CREATE GENERATOR m3 FOR p3;')\n@@ -834,12 +835,12 @@ def test_predictive_relevance():\n bayesdb_register_backend(bdb, CGPM_Backend(cgpm_registry=dict()))\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA (\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ apogee NUMERICAL;\n+ class_of_orbit CATEGORICAL;\n+ country_of_operator CATEGORICAL;\n+ launch_mass NUMERICAL;\n+ perigee NUMERICAL;\n+ period NUMERICAL\n )\n ''')\n bdb.execute('CREATE GENERATOR m FOR satellites;')\n@@ -1003,7 +1004,7 @@ def test_add_drop_models():\n bdb, CGPM_Backend(dict(), multiprocess=0))\n bdb.execute('''\n CREATE POPULATION p FOR satellites_ucs WITH SCHEMA(\n- GUESS STATTYPES FOR (*);\n+ GUESS STATTYPES OF (*);\n )\n ''')\n bdb.execute('CREATE GENERATOR m FOR p (SUBSAMPLE 10);')\n@@ -1139,12 +1140,12 @@ def test_using_modelnos():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ SET STATTYPE OF apogee TO NUMERICAL;\n+ SET STATTYPE OF class_of_orbit TO CATEGORICAL;\n+ SET STATTYPE OF country_of_operator TO CATEGORICAL;\n+ SET STATTYPE OF launch_mass TO NUMERICAL;\n+ SET STATTYPE OF perigee TO NUMERICAL;\n+ SET STATTYPE OF period TO NUMERICAL\n )\n ''')\n bayesdb_register_backend(bdb, CGPM_Backend(dict(), multiprocess=0))\n" }, { "change_type": "MODIFY", "old_path": "tests/test_cgpm_alter.py", "new_path": "tests/test_cgpm_alter.py", "diff": "@@ -32,12 +32,12 @@ def cgpm_dummy_satellites_pop_bdb():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n create population satellites for satellites_ucs with schema(\n- model apogee as numerical;\n- model class_of_orbit as categorical;\n- model country_of_operator as categorical;\n- model launch_mass as numerical;\n- model perigee as numerical;\n- model period as numerical\n+ apogee numerical;\n+ class_of_orbit categorical;\n+ country_of_operator categorical;\n+ launch_mass numerical;\n+ perigee numerical;\n+ period numerical\n )\n ''')\n backend = CGPM_Backend(dict(), multiprocess=0)\n" }, { "change_type": "MODIFY", "old_path": "tests/test_cgpm_analysis.py", "new_path": "tests/test_cgpm_analysis.py", "diff": "@@ -34,12 +34,11 @@ def test_analysis_subproblems_basic():\n with cgpm_dummy_satellites_bdb() as bdb:\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS CATEGORICAL;\n- MODEL country_of_operator AS CATEGORICAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ SET STATTYPE OF class_of_orbit TO CATEGORICAL;\n+ SET STATTYPE OF country_of_operator TO CATEGORICAL;\n+ SET STATTYPE OF launch_mass TO NUMERICAL;\n+ SET STATTYPE OF perigee TO NUMERICAL;\n+ SET STATTYPE OF period TO NUMERICAL\n )\n ''')\n bayesdb_register_backend(bdb, CGPM_Backend(dict(), multiprocess=0))\n" }, { "change_type": "MODIFY", "old_path": "tests/test_cmi.py", "new_path": "tests/test_cmi.py", "diff": "@@ -41,8 +41,8 @@ def smoke_bdb():\n \n bdb.execute('''\n CREATE POPULATION p FOR t WITH SCHEMA (\n- MODEL a, b, c, d AS NUMERICAL;\n- MODEL e AS NOMINAL\n+ SET STATTYPES OF a, b, c, d TO NUMERICAL;\n+ SET STATTYPES OF e TO NOMINAL\n )\n ''')\n \n" }, { "change_type": "MODIFY", "old_path": "tests/test_condprob.py", "new_path": "tests/test_condprob.py", "diff": "@@ -27,7 +27,8 @@ def test_conditional_probability_simple_inferences():\n bdb.sql_execute('insert into t values (?, ?)', row)\n bdb.execute('''\n create population p for t (\n- model foo, bar as categorical\n+ foo categorical;\n+ bar categorical;\n )\n ''')\n bdb.execute('create generator p_cc for p using cgpm;')\n" }, { "change_type": "MODIFY", "old_path": "tests/test_core.py", "new_path": "tests/test_core.py", "diff": "@@ -539,7 +539,7 @@ def test_bayesdb_population_add_variable():\n bdb.sql_execute('create table t (a real, b ignore, c real)')\n bdb.execute('''\n create population p for t with schema(\n- model a, c as numerical;\n+ set stattypes of a, c to numerical;\n b ignore;\n );\n ''')\n" }, { "change_type": "MODIFY", "old_path": "tests/test_infer_hypothetical.py", "new_path": "tests/test_infer_hypothetical.py", "diff": "@@ -42,7 +42,7 @@ def bdb():\n bdb.sql_execute('INSERT INTO t (a, b) VALUES (1,0)')\n \n # Create the population and generator on the existing rows.\n- bdb.execute('CREATE POPULATION p FOR t (MODEL a, b AS NOMINAL)')\n+ bdb.execute('CREATE POPULATION p FOR t (SET STATTYPES OF a, b TO NOMINAL)')\n bdb.execute('CREATE GENERATOR m FOR p;')\n bdb.execute('INITIALIZE 1 MODELS FOR m;')\n bdb.execute('ANALYZE m FOR 1000 ITERATION (OPTIMIZED);')\n" }, { "change_type": "MODIFY", "old_path": "tests/test_nig_normal.py", "new_path": "tests/test_nig_normal.py", "diff": "@@ -47,7 +47,10 @@ def test_nig_normal_latent_numbering():\n for x in xrange(100):\n bdb.sql_execute('insert into t(x, y) values(?, ?)', (x, x*x - 100))\n bdb.execute('''\n- create population p for t(id ignore; model x,y as numerical)\n+ create population p for t(\n+ id ignore;\n+ set stattypes of x,y to numerical;\n+ )\n ''')\n assert core.bayesdb_has_population(bdb, 'p')\n pid = core.bayesdb_get_population(bdb, 'p')\n" }, { "change_type": "MODIFY", "old_path": "tests/test_parse.py", "new_path": "tests/test_parse.py", "diff": "@@ -738,13 +738,23 @@ def test_trivial_precedence_error():\n def test_trivial_commands():\n assert parse_bql_string('''\n create population satellites for satellites_ucs (\n- MODEL country_of_operator, orbit_type AS categorical;\n- MODEL launch_mass AS numerical;\n- MODEL perigee AS numerical;\n- MODEL apogee, period AS numerical\n+ guess(*);\n )\n ''') == \\\n [ast.CreatePop(False, 'satellites', 'satellites_ucs', [\n+ ast.PopGuessVars('*'),\n+ ])]\n+ assert parse_bql_string('''\n+ create population satellites for satellites_ucs (\n+ guess stattypes of launch_site, \"contracto=r\";\n+ set stattype of country_of_operator, orbit_type to categorical;\n+ set stattype of launch_mass to numerical;\n+ set stattype of perigee to numerical;\n+ set stattype of apogee, period to numerical;\n+ )\n+ ''') == \\\n+ [ast.CreatePop(False, 'satellites', 'satellites_ucs', [\n+ ast.PopGuessVars(['launch_site', 'contracto=r']),\n ast.PopModelVars(\n ['country_of_operator', 'orbit_type'], 'categorical'),\n ast.PopModelVars(['launch_mass'], 'numerical'),\n@@ -753,8 +763,8 @@ def test_trivial_commands():\n ])]\n assert parse_bql_string('''\n create population satellites for satellites_ucs (\n- MODEL country_of_operator, orbit_type AS categorical;;\n- MODEL apogee, period AS numerical;;\n+ set stattype of country_of_operator, orbit_type to categorical;;\n+ set stattype of apogee, period to numerical;;\n )\n ''') == \\\n [ast.CreatePop(False, 'satellites', 'satellites_ucs', [\n@@ -762,6 +772,24 @@ def test_trivial_commands():\n ['country_of_operator', 'orbit_type'], 'categorical'),\n ast.PopModelVars(['apogee', 'period'], 'numerical'),\n ])]\n+ assert parse_bql_string('''\n+ create population satellites for satellites_ucs (\n+ country_of_operator categorical;\n+ orbit_type categorical;\n+ launch_mass numerical;\n+ perigee numerical;\n+ apogee numerical;\n+ period numerical;\n+ )\n+ ''') == \\\n+ [ast.CreatePop(False, 'satellites', 'satellites_ucs', [\n+ ast.PopModelVars(['country_of_operator'], 'categorical'),\n+ ast.PopModelVars(['orbit_type'], 'categorical'),\n+ ast.PopModelVars(['launch_mass'], 'numerical'),\n+ ast.PopModelVars(['perigee'], 'numerical'),\n+ ast.PopModelVars(['apogee'], 'numerical'),\n+ ast.PopModelVars(['period'], 'numerical'),\n+ ])]\n assert parse_bql_string('drop population satellites') == \\\n [ast.DropPop(False, 'satellites')]\n assert parse_bql_string('create generator t_cc for t using cgpm'\n" }, { "change_type": "MODIFY", "old_path": "tests/test_regress.py", "new_path": "tests/test_regress.py", "diff": "@@ -29,12 +29,12 @@ def test_regress_bonanza__ci_integration():\n bdb, CGPM_Backend(dict(), multiprocess=0))\n bdb.execute('''\n CREATE POPULATION satellites FOR satellites_ucs WITH SCHEMA(\n- MODEL apogee AS NUMERICAL;\n- MODEL class_of_orbit AS NOMINAL;\n- MODEL country_of_operator AS NOMINAL;\n- MODEL launch_mass AS NUMERICAL;\n- MODEL perigee AS NUMERICAL;\n- MODEL period AS NUMERICAL\n+ apogee NUMERICAL;\n+ class_of_orbit NOMINAL;\n+ country_of_operator NOMINAL;\n+ launch_mass NUMERICAL;\n+ perigee NUMERICAL;\n+ period NUMERICAL;\n )\n ''')\n bdb.execute('''\n" }, { "change_type": "MODIFY", "old_path": "tests/test_simulate.py", "new_path": "tests/test_simulate.py", "diff": "@@ -87,7 +87,7 @@ def test_simulate_given_rowid():\n bdb.execute('''\n CREATE POPULATION t_p FOR t WITH SCHEMA {\n IGNORE x;\n- MODEL y AS NUMERICAL;\n+ y NUMERICAL;\n }\n ''')\n bdb.execute('''\n@@ -153,7 +153,7 @@ def test_simulate_given_rowid_multivariate():\n 'INSERT INTO t (x, y, z, w) VALUES (?, ?, ?, ?)', row)\n bdb.execute('''\n CREATE POPULATION t_p FOR t WITH SCHEMA {\n- MODEL y, z, w AS NUMERICAL;\n+ SET STATTYPES OF y, z, w TO NUMERICAL;\n IGNORE x\n }\n ''')\n@@ -219,7 +219,7 @@ def test_simulate_given_rowid_unincorporated():\n 'INSERT INTO t (x, y, z, w) VALUES (?, ?, ?, ?)', row)\n bdb.execute('''\n CREATE POPULATION t_p FOR t WITH SCHEMA {\n- MODEL y, z, w AS NUMERICAL;\n+ SET STATTYPES OF y, z, w TO NUMERICAL;\n IGNORE x\n }\n ''')\n" } ]
ae51d04afc10433ae87c6172492bf4183e69e643
probcomp/bayeslite
05.03.2018 10:17:55
Apache License 2.0
Make loom process constraints for conditional mutual information. Before, constraints (i.e. conditions were ignored) for mutual information with loom.
[ { "change_type": "MODIFY", "old_path": "src/backends/loom_backend.py", "new_path": "src/backends/loom_backend.py", "diff": "@@ -31,6 +31,7 @@ from StringIO import StringIO\n from collections import Counter\n from collections import OrderedDict\n from datetime import datetime\n+import numpy as np\n \n import loom.tasks\n \n@@ -571,11 +572,127 @@ class LoomBackend(BayesDB_Backend):\n ''', (generator_id, modelno, kind_id, rowid))\n return cursor_value(cursor)\n \n+ def _get_constraint_row(\n+ self,\n+ constraints,\n+ bdb,\n+ generator_id,\n+ population_id,\n+ server\n+ ):\n+ \"\"\"For a given tuple of constraints, return the conditioning row in loom\n+ style.\"\"\"\n+ if constraints:\n+ row_constraints = {\n+ bayesdb_variable_name(bdb, generator_id, None, colno) : value\n+ for colno, value in constraints\n+ }\n+ # XXX: I am sure that there is a more pythonic way to do this.\n+ csv_headers = row_constraints.keys()\n+ csv_values = row_constraints.values()\n+\n+ # Copy-pasta from simulate_joint.\n+ csv_headers_str = [str(a).lower() for a in csv_headers]\n+ csv_values_str = [str(a) for a in csv_values]\n+ lower_to_upper = {str(a).lower(): str(a) for a in csv_headers_str}\n+ # XXX: again, copy-pasta. Isn't this just csv_headers?\n+ header = [\n+ lower_to_upper[column_name]\n+ for column_name in csv_headers_str\n+ ]\n+ conditioning_row_loom_format = server.encode_row(\n+ csv_values_str,\n+ header\n+ )\n+ else:\n+ conditioning_row_loom_format = None\n+ return conditioning_row_loom_format\n+\n+ def _marginalize_constraints(self, constraints):\n+ \"\"\"Parse constraints, decide which are targets for marginalization.\"\"\"\n+ targets = []\n+ fixed_constraints = []\n+ for constraint in constraints:\n+ if constraint[1] is None:\n+ targets.append(constraint[0])\n+ else:\n+ fixed_constraints.append(constraint)\n+ return targets, fixed_constraints\n+\n+ def _simulate_constraints(\n+ self,\n+ bdb,\n+ generator_id,\n+ modelnos,\n+ constraints,\n+ num_samples\n+ ):\n+ \"\"\"Sample values for constraints that need marginalization.\"\"\"\n+ rowid = None # For CMI, rowid is always None.\n+ # Detect which constraints come with fixed values, and which needs to\n+ # targeted for marginalization.\n+ targets, fixed_constraints = self._marginalize_constraints(constraints)\n+ # Call simulate to jointly sample the constraints that need\n+ # marginilation.\n+ samples = self.simulate_joint(\n+ bdb,\n+ generator_id,\n+ modelnos,\n+ rowid,\n+ targets,\n+ num_samples=num_samples,\n+ constraints=fixed_constraints\n+ )\n+ # Return sampled constraint values and fixed constrained values.\n+ return [\n+ zip(targets, sample) + fixed_constraints\n+ for sample in samples\n+ ]\n+\n+ def _get_constraint_rows(\n+ self,\n+ constraints,\n+ bdb,\n+ generator_id,\n+ population_id,\n+ modelnos,\n+ server,\n+ inner_numsamples\n+ ):\n+ \"\"\"Return constraints in loom's format for cases where we need to\n+ marginialize out.\"\"\"\n+ # Simulate n constraint rows.\n+ simulated_constraints = self._simulate_constraints(\n+ bdb,\n+ generator_id,\n+ modelnos,\n+ constraints,\n+ inner_numsamples\n+ )\n+ # Generate the format loom requires.\n+ all_constraint_rows = [\n+ self._get_constraint_row(\n+ simulated_constraint,\n+ bdb,\n+ generator_id,\n+ population_id,\n+ server\n+ )\n+ for simulated_constraint in simulated_constraints\n+ ]\n+ print all_constraint_rows\n+ return all_constraint_rows\n+\n+\n+ def _marginize_cmi(self, constraints):\n+ \"\"\"Check if we need to marginalize over constraint values.\"\"\"\n+ if not constraints:\n+ return False\n+ return None in [constraint[1] for constraint in constraints]\n+\n def column_mutual_information(self, bdb, generator_id, modelnos, colnos0,\n colnos1, constraints, numsamples):\n- # XXX Why are the constraints being ignored? If Loom does not support\n- # conditioning, then implement constraints using the simple Monte Carlo\n- # estimator.\n+ \"\"\"Compute conditional mutual information.\"\"\"\n population_id = bayesdb_generator_population(bdb, generator_id)\n colnames0 = [\n str(bayesdb_variable_name(bdb, population_id, None, colno))\n@@ -588,13 +705,42 @@ class LoomBackend(BayesDB_Backend):\n server = self._get_preql_server(bdb, generator_id)\n target_set = server._cols_to_mask(server.encode_set(colnames0))\n query_set = server._cols_to_mask(server.encode_set(colnames1))\n- mi = server._query_server.mutual_information(\n- target_set,\n- query_set,\n- entropys=None,\n- sample_count=loom.preql.SAMPLE_COUNT\n- )\n- return mi\n+ # Check if we have to marginalize the condition:\n+ if self._marginize_cmi(constraints):\n+ inner_numsamples = numsamples\n+ conditioning_rows_loom_format = self._get_constraint_rows(\n+ constraints,\n+ bdb,\n+ generator_id,\n+ population_id,\n+ modelnos,\n+ server,\n+ inner_numsamples\n+ )\n+\n+ else: # Otherwise, no marginalization is needed.\n+ conditioning_rows_loom_format = [\n+ self._get_constraint_row(\n+ constraints,\n+ bdb,\n+ generator_id,\n+ population_id,\n+ server\n+ )\n+ ]\n+ mi_estimates = [\n+ server._query_server.mutual_information(\n+ target_set,\n+ query_set,\n+ entropys=None,\n+ sample_count=loom.preql.SAMPLE_COUNT, # XXX: wrong but quick;\n+ # the default for sample_count is 1000.\n+ conditioning_row=conditioning_row_loom_format,\n+ ).mean\n+ for conditioning_row_loom_format in conditioning_rows_loom_format\n+ ]\n+ # Output requires and iterable.\n+ return [np.mean(mi_estimates)]\n \n def row_similarity(self, bdb, generator_id, modelnos, rowid, target_rowid,\n colnos):\n" } ]
07f29e770a1ef9f3b07a4809e9e7e81469438903
sys-bio/tellurium
01.12.2021 13:20:04
Apache License 2.0
Clear previously-loaded models in Antimony when getting a new one. Repeated calls to 'loada' were causing steady increases in used memory because the Antimony library was storing every model. This clears them out each time, since a tellurium user doesn't care about retrieving old models. Also clean up a couple of warnings.
[ { "change_type": "MODIFY", "old_path": "tellurium/tellurium.py", "new_path": "tellurium/tellurium.py", "diff": "@@ -12,7 +12,6 @@ model export, plotting or the Jarnac compatibility layer.\n \n from __future__ import print_function, division, absolute_import\n \n-import sys\n import os\n import random\n import warnings\n@@ -185,6 +184,7 @@ try:\n import sbol\n except ImportError as e:\n sbol = None\n+ roadrunner.Logger.log(roadrunner.Logger.LOG_WARNING, str(e))\n warnings.warn(\"'pySBOL' could not be imported, cannot import/export SBOL files\", ImportWarning, stacklevel=2)\n \n try:\n@@ -405,7 +405,7 @@ def distributed_sensitivity_analysis(sc,senitivity_analysis_model,calculation=No\n sa_model.simulation = user_defined_simulator()\n \n if(sa_model.sbml):\n- model_roadrunner = te.loadAntimonyModel(te.sbmlToAntimony(sa_model.model))\n+ model_roadrunner = te.loadSBMLModel(sa_model.model)\n else:\n model_roadrunner = te.loadAntimonyModel(sa_model.model)\n \n@@ -464,7 +464,7 @@ def distributed_sensitivity_analysis(sc,senitivity_analysis_model,calculation=No\n \n samples = perform_sampling(np.meshgrid(*params))\n samples = zip([senitivity_analysis_model]*len(samples),samples)\n- if(calculation is \"avg\"):\n+ if(calculation == \"avg\"):\n group_rdd = sc.parallelize(samples,len(samples)).map(spark_sensitivity_analysis).\\\n flatMap(lambda x: x[1].items()).groupByKey()\n \n@@ -589,6 +589,7 @@ def antimonyToSBML(ant):\n :return: SBML\n :rtype: str\n \"\"\"\n+ antimony.clearPreviousLoads()\n try:\n isfile = os.path.isfile(ant)\n except ValueError:\n@@ -611,6 +612,7 @@ def antimonyToCellML(ant):\n :return: CellML\n :rtype: str\n \"\"\"\n+ antimony.clearPreviousLoads()\n if os.path.isfile(ant):\n code = antimony.loadAntimonyFile(ant)\n else:\n@@ -628,6 +630,7 @@ def sbmlToAntimony(sbml):\n :return: Antimony\n :rtype: str\n \"\"\"\n+ antimony.clearPreviousLoads()\n isfile = False\n try:\n isfile = os.path.isfile(sbml)\n@@ -651,6 +654,7 @@ def sbmlToCellML(sbml):\n \"\"\"\n if not hasattr(antimony, \"loadCellMLString\"):\n raise NotImplementedError(\"CellML support was not compiled into Antimony, so conversion is not available.\")\n+ antimony.clearPreviousLoads()\n if os.path.isfile(sbml):\n code = antimony.loadSBMLFile(sbml)\n else:\n@@ -668,6 +672,7 @@ def cellmlToAntimony(cellml):\n \"\"\"\n if not hasattr(antimony, \"loadCellMLString\"):\n raise NotImplementedError(\"CellML support was not compiled into Antimony, so conversion is not available.\")\n+ antimony.clearPreviousLoads()\n if os.path.isfile(cellml):\n code = antimony.loadCellMLFile(cellml)\n else:\n@@ -686,6 +691,7 @@ def cellmlToSBML(cellml):\n \"\"\"\n if not hasattr(antimony, \"loadCellMLString\"):\n raise NotImplementedError(\"CellML support was not compiled into Antimony, so conversion is not available.\")\n+ antimony.clearPreviousLoads()\n if os.path.isfile(cellml):\n code = antimony.loadCellMLFile(cellml)\n else:\n@@ -751,7 +757,7 @@ def extractFileFromCombineArchive(archive_path, entry_location):\n if not archive.initializeFromArchive(archive_path):\n raise RuntimeError('Failed to initialize archive')\n try:\n- entry = archive.getEntryByLocation(entry_location)\n+ archive.getEntryByLocation(entry_location)\n except:\n raise RuntimeError('Could not find entry {}'.format(entry_location))\n return archive.extractEntryToString(entry_location)\n" } ]
bb6072b6cb5ea2fa239357708c47e7d62cc8bae0
sys-bio/tellurium
01.07.2022 12:57:59
Apache License 2.0
Drop phrasedml and sbml2matlab requirement. phrasedml and sbml2matlab don't exist for 3.10 yet, so we need to adjust code and fail gracefully when trying to import them.
[ { "change_type": "MODIFY", "old_path": "spyder_mod/Spyder 5.1.5/site-packages/spyder/config/main.py", "new_path": "spyder_mod/Spyder 5.1.5/site-packages/spyder/config/main.py", "diff": "@@ -147,7 +147,7 @@ DEFAULTS = [\n 'pylab/inline/width': 6,\n 'pylab/inline/height': 4,\n 'pylab/inline/bbox_inches': True,\n- 'startup/run_lines': 'import antimony; import sbml2matlab; import rrplugins; import numpy; import scipy; import matplotlib; import roadrunner; import tellurium as te',\n+ 'startup/run_lines': 'import antimony; import rrplugins; import numpy; import scipy; import matplotlib; import roadrunner; import tellurium as te',\n 'startup/use_run_file': False,\n 'startup/run_file': '',\n 'greedy_completer': False,\n" }, { "change_type": "MODIFY", "old_path": "tellurium/teconverters/convert_omex.py", "new_path": "tellurium/teconverters/convert_omex.py", "diff": "@@ -4,7 +4,6 @@ Class for working with omex files.\n from __future__ import print_function, division, absolute_import\n import os\n import re\n-import shutil\n import tempfile\n import json\n import getpass\n@@ -16,8 +15,10 @@ try:\n except ImportError:\n import tecombine as libcombine\n \n-\n-from .convert_phrasedml import phrasedmlImporter\n+try:\n+ from .convert_phrasedml import phrasedmlImporter\n+except:\n+ pass\n from .convert_antimony import antimonyConverter\n \n class OmexFormatDetector:\n@@ -146,8 +147,11 @@ class Omex(object):\n \n :return:\n \"\"\"\n- import phrasedml\n- phrasedml.clearReferencedSBML()\n+ try:\n+ import phrasedml\n+ phrasedml.clearReferencedSBML()\n+ except:\n+ pass\n \n workingDir = tempfile.mkdtemp(suffix=\"_sedml\")\n self.writeFiles(workingDir)\n@@ -163,8 +167,11 @@ class Omex(object):\n \"\"\" Export Omex instance as combine archive.\n \n :param outfile: A path to the output file\"\"\"\n- import phrasedml\n- phrasedml.clearReferencedSBML()\n+ try:\n+ import phrasedml\n+ phrasedml.clearReferencedSBML()\n+ except:\n+ pass\n \n archive = libcombine.CombineArchive()\n description = libcombine.OmexDescription()\n@@ -232,9 +239,7 @@ class inlineOmexImporter:\n if not os.path.isfile(path):\n raise IOError('No such file: {}'.format(path))\n \n- d = None\n if not os.access(os.getcwd(), os.W_OK):\n- d = os.getcwd()\n os.chdir(tempfile.gettempdir())\n \n omex = libcombine.CombineArchive()\n@@ -242,8 +247,6 @@ class inlineOmexImporter:\n raise IOError('Could not read COMBINE archive.')\n importer = inlineOmexImporter(omex)\n \n- # if d is not None:\n- # os.chdir(d)\n return importer\n \n def __init__(self, omex):\n@@ -445,13 +448,14 @@ class inlineOmexImporter:\n for entry in self.sedml_entries:\n sedml_str = self.omex.extractEntryToString(entry.getLocation()).replace('BIOMD0000000012,xml',\n 'BIOMD0000000012.xml')\n+ phrasedml_output = \"\"\n try:\n phrasedml_output = phrasedmlImporter.fromContent(\n sedml_str,\n self.makeSBMLResourceMap(self.fixSep(os.path.dirname(entry.getLocation())))\n ).toPhrasedml().rstrip().replace('compartment', 'compartment_')\n except Exception as e:\n- errmsg = 'Could not read embedded SED-ML file {}.'.format(entry.getLocation())\n+ errmsg = 'Could not read embedded SED-ML file or could not convert to phraSED-ML: {}.\\n{}'.format(entry.getLocation(), e.what())\n try:\n try:\n import libsedml\n" }, { "change_type": "MODIFY", "old_path": "tellurium/teconverters/convert_phrasedml.py", "new_path": "tellurium/teconverters/convert_phrasedml.py", "diff": "@@ -2,114 +2,119 @@ from __future__ import print_function, division, absolute_import\n \n import os\n import re\n-import phrasedml\n try:\n import tesedml as libsedml\n except ImportError:\n import libsedml\n \n+#Only load this class if phrasedml exists\n+try:\n+ import phrasedml\n \n-class phrasedmlImporter(object):\n-\n- def __init__(self, sbml_map={}):\n- \"\"\" Constructor. \"\"\"\n- self.sedml_str = None\n- self.sedml_path = None\n- self.sbml_map = sbml_map\n-\n-\n- @classmethod\n- def fromContent(cls, sedml_str, sbml_map={}):\n-\n- # FIXME: bad hack for https://github.com/fbergmann/libSEDML/issues/47\n- # test for JWS quirks\n- if 'xmlns=\"http://sed-ml.org/sed-ml/level1/version3\"' in sedml_str:\n- # import xml.etree.ElementTree as ElementTree\n- # root = ElementTree.fromstring(sedml_str)\n- # for p in root.findall('{http://sed-ml.org/sed-ml/level1/version3}plot2D'):\n- # if not 'logX' in p.attrib or not 'logY' in p.attrib:\n- # logX = False\n- # logY = False\n- # for l in p.findall('{http://sed-ml.org/sed-ml/level1/version3}listOfCurves'):\n- # for c in l.findall('{http://sed-ml.org/sed-ml/level1/version3}curve'):\n- # if 'logX' in c.attrib and c.attrib['logX'].lower() == 'true':\n- # logX = True\n- # if 'logY' in c.attrib and c.attrib['logY'].lower() == 'true':\n- # logY = True\n- # p.set('logX', logX)\n- # p.set('logY', logY)\n- # sedml_str = (ElementTree.tostring(root, encoding='utf8', method='xml')).decode('utf8')\n- while True:\n- p = sedml_str.find('plot2D')\n- if p < 0:\n- break\n- b = sedml_str.find('>', p)\n- if b < 0:\n- break\n- l = sedml_str.find('logX', p)\n- if l < 0 or b < l:\n- sedml_str = sedml_str[:p] + 'plot2D logX=\"false\" logY=\"false\" ' + sedml_str[p+len('plot2D'):]\n+ class phrasedmlImporter(object):\n+ \n+ def __init__(self, sbml_map={}):\n+ \"\"\" Constructor. \"\"\"\n+ self.sedml_str = None\n+ self.sedml_path = None\n+ self.sbml_map = sbml_map\n+ \n+ \n+ @classmethod\n+ def fromContent(cls, sedml_str, sbml_map={}):\n+ \n+ # FIXME: bad hack for https://github.com/fbergmann/libSEDML/issues/47\n+ # test for JWS quirks\n+ if 'xmlns=\"http://sed-ml.org/sed-ml/level1/version3\"' in sedml_str:\n+ # import xml.etree.ElementTree as ElementTree\n+ # root = ElementTree.fromstring(sedml_str)\n+ # for p in root.findall('{http://sed-ml.org/sed-ml/level1/version3}plot2D'):\n+ # if not 'logX' in p.attrib or not 'logY' in p.attrib:\n+ # logX = False\n+ # logY = False\n+ # for l in p.findall('{http://sed-ml.org/sed-ml/level1/version3}listOfCurves'):\n+ # for c in l.findall('{http://sed-ml.org/sed-ml/level1/version3}curve'):\n+ # if 'logX' in c.attrib and c.attrib['logX'].lower() == 'true':\n+ # logX = True\n+ # if 'logY' in c.attrib and c.attrib['logY'].lower() == 'true':\n+ # logY = True\n+ # p.set('logX', logX)\n+ # p.set('logY', logY)\n+ # sedml_str = (ElementTree.tostring(root, encoding='utf8', method='xml')).decode('utf8')\n+ while True:\n+ p = sedml_str.find('plot2D')\n+ if p < 0:\n+ break\n+ b = sedml_str.find('>', p)\n+ if b < 0:\n+ break\n+ l = sedml_str.find('logX', p)\n+ if l < 0 or b < l:\n+ sedml_str = sedml_str[:p] + 'plot2D logX=\"false\" logY=\"false\" ' + sedml_str[p+len('plot2D'):]\n+ else:\n+ break\n+ print(sedml_str)\n+ \n+ \n+ importer = phrasedmlImporter(sbml_map)\n+ importer.sedml_str = sedml_str\n+ # test for errors\n+ result = importer.toPhrasedml()\n+ if result is None:\n+ # get errors from libsedml\n+ doc = libsedml.SedReader().readSedMLFromString(sedml_str)\n+ if doc.getNumErrors():\n+ max_len = 100\n+ message = doc.getError(doc.getNumErrors()-1).getMessage()\n+ message = message[:max_len] + '...' if len(message) > max_len else message\n+ raise RuntimeError('Errors reading SED-ML: {}'.format(message))\n else:\n- break\n- print(sedml_str)\n-\n-\n- importer = phrasedmlImporter(sbml_map)\n- importer.sedml_str = sedml_str\n- # test for errors\n- result = importer.toPhrasedml()\n- if result is None:\n- # get errors from libsedml\n- doc = libsedml.SedReader().readSedMLFromString(sedml_str)\n- if doc.getNumErrors():\n- max_len = 100\n- message = doc.getError(doc.getNumErrors()-1).getMessage()\n- message = message[:max_len] + '...' if len(message) > max_len else message\n- raise RuntimeError('Errors reading SED-ML: {}'.format(message))\n- else:\n- raise RuntimeError('Unable to read SED-ML.')\n- return importer\n+ raise RuntimeError('Unable to read SED-ML.')\n+ return importer\n+ \n+ \n+ def isInRootDir(self, file):\n+ d = os.path.split(file)[0]\n+ return d == '' or d == '.'\n+ \n+ def removeFileExt(self, filename):\n+ return os.path.splitext(filename)[0]\n+ \n+ def formatResource(self, filename):\n+ \"\"\" Normalizes and also strips xml extension.\"\"\"\n+ return self.removeFileExt(os.path.normpath(filename))\n+ \n+ def fixModelRefs(self, phrasedml_str):\n+ ''' Changes all references of type myModel.xml to myModel.'''\n+ model_ref = re.compile(r'^.*\\s*model\\s*\"([^\"]*)\"\\s*$')\n+ out_str = ''\n+ for line in phrasedml_str.splitlines():\n+ match = model_ref.match(line)\n+ if match:\n+ filename = match.group(1)\n+ if self.isInRootDir(filename):\n+ line = line.replace(filename,self.formatResource(filename))\n+ out_str += line+'\\n'\n+ return out_str\n+ \n+ def toPhrasedml(self):\n+ # assign sbml resources\n+ # print('toPhrasedml sbml resources:')\n+ phrasedml.clearReferencedSBML()\n+ for sbml_resource in self.sbml_map:\n+ # print(' {} -> {}'.format(sbml_resource, self.sbml_map[sbml_resource][:30]))\n+ phrasedml.setReferencedSBML(sbml_resource, self.sbml_map[sbml_resource])\n+ # convert to phrasedml\n+ if self.sedml_str:\n+ result = phrasedml.convertString(self.sedml_str)\n+ if result is None:\n+ raise RuntimeError(phrasedml.getLastError())\n+ return self.fixModelRefs(phrasedml.getLastPhraSEDML())\n+ elif self.sedml_path:\n+ result = phrasedml.convertFile(self.sedml_str)\n+ if result is None:\n+ raise RuntimeError(phrasedml.getLastError())\n+ return self.fixModelRefs(phrasedml.getLastPhraSEDML())\n \n-\n- def isInRootDir(self, file):\n- d = os.path.split(file)[0]\n- return d == '' or d == '.'\n-\n- def removeFileExt(self, filename):\n- return os.path.splitext(filename)[0]\n-\n- def formatResource(self, filename):\n- \"\"\" Normalizes and also strips xml extension.\"\"\"\n- return self.removeFileExt(os.path.normpath(filename))\n-\n- def fixModelRefs(self, phrasedml_str):\n- ''' Changes all references of type myModel.xml to myModel.'''\n- model_ref = re.compile(r'^.*\\s*model\\s*\"([^\"]*)\"\\s*$')\n- out_str = ''\n- for line in phrasedml_str.splitlines():\n- match = model_ref.match(line)\n- if match:\n- filename = match.group(1)\n- if self.isInRootDir(filename):\n- line = line.replace(filename,self.formatResource(filename))\n- out_str += line+'\\n'\n- return out_str\n-\n- def toPhrasedml(self):\n- # assign sbml resources\n- # print('toPhrasedml sbml resources:')\n- phrasedml.clearReferencedSBML()\n- for sbml_resource in self.sbml_map:\n- # print(' {} -> {}'.format(sbml_resource, self.sbml_map[sbml_resource][:30]))\n- phrasedml.setReferencedSBML(sbml_resource, self.sbml_map[sbml_resource])\n- # convert to phrasedml\n- if self.sedml_str:\n- result = phrasedml.convertString(self.sedml_str)\n- if result is None:\n- raise RuntimeError(phrasedml.getLastError())\n- return self.fixModelRefs(phrasedml.getLastPhraSEDML())\n- elif self.sedml_path:\n- result = phrasedml.convertFile(self.sedml_str)\n- if result is None:\n- raise RuntimeError(phrasedml.getLastError())\n- return self.fixModelRefs(phrasedml.getLastPhraSEDML())\n+except:\n+ pass\n\\ No newline at end of file\n" } ]
eb5e9b4d3efd933b73c87419b185a8a019ccc8be
peercoin/peercoin
06.11.2020 15:34:41
MIT License
Fix wallet_send.py wallet setup to work with descriptors Fixes the wallet setup so this test works with descriptor wallets. Also enabled explicit descriptor and legacy wallet testing in the test runner.
[ { "change_type": "MODIFY", "old_path": "test/functional/test_runner.py", "new_path": "test/functional/test_runner.py", "diff": "@@ -252,7 +252,8 @@ BASE_SCRIPTS = [\n 'rpc_estimatefee.py',\n 'rpc_getblockstats.py',\n 'wallet_create_tx.py --legacy-wallet',\n- 'wallet_send.py',\n+ 'wallet_send.py --legacy-wallet',\n+ 'wallet_send.py --descriptors',\n 'wallet_create_tx.py --descriptors',\n 'p2p_fingerprint.py',\n 'feature_uacomment.py',\n" }, { "change_type": "MODIFY", "old_path": "test/functional/wallet_send.py", "new_path": "test/functional/wallet_send.py", "diff": "@@ -8,6 +8,7 @@ from decimal import Decimal, getcontext\n from itertools import product\n \n from test_framework.authproxy import JSONRPCException\n+from test_framework.descriptors import descsum_create\n from test_framework.test_framework import BitcoinTestFramework\n from test_framework.util import (\n assert_equal,\n@@ -168,49 +169,91 @@ class WalletSendTest(BitcoinTestFramework):\n self.nodes[1].createwallet(wallet_name=\"w1\")\n w1 = self.nodes[1].get_wallet_rpc(\"w1\")\n # w2 contains the private keys for w3\n- self.nodes[1].createwallet(wallet_name=\"w2\")\n+ self.nodes[1].createwallet(wallet_name=\"w2\", blank=True)\n w2 = self.nodes[1].get_wallet_rpc(\"w2\")\n+ xpriv = \"tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v\"\n+ xpub = \"tpubD6NzVbkrYhZ4YkEfMbRJkQyZe7wTkbTNRECozCtJPtdLRn6cT1QKb8yHjwAPcAr26eHBFYs5iLiFFnCbwPRsncCKUKCfubHDMGKzMVcN1Jg\"\n+ if self.options.descriptors:\n+ w2.importdescriptors([{\n+ \"desc\": descsum_create(\"wpkh(\" + xpriv + \"/0/0/*)\"),\n+ \"timestamp\": \"now\",\n+ \"range\": [0, 100],\n+ \"active\": True\n+ },{\n+ \"desc\": descsum_create(\"wpkh(\" + xpriv + \"/0/1/*)\"),\n+ \"timestamp\": \"now\",\n+ \"range\": [0, 100],\n+ \"active\": True,\n+ \"internal\": True\n+ }])\n+ else:\n+ w2.sethdseed(True)\n+\n # w3 is a watch-only wallet, based on w2\n self.nodes[1].createwallet(wallet_name=\"w3\", disable_private_keys=True)\n w3 = self.nodes[1].get_wallet_rpc(\"w3\")\n- for _ in range(3):\n- a2_receive = w2.getnewaddress()\n- a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation\n- res = w3.importmulti([{\n- \"desc\": w2.getaddressinfo(a2_receive)[\"desc\"],\n+ if self.options.descriptors:\n+ # Match the privkeys in w2 for descriptors\n+ res = w3.importdescriptors([{\n+ \"desc\": descsum_create(\"wpkh(\" + xpub + \"/0/0/*)\"),\n \"timestamp\": \"now\",\n+ \"range\": [0, 100],\n \"keypool\": True,\n+ \"active\": True,\n \"watchonly\": True\n },{\n- \"desc\": w2.getaddressinfo(a2_change)[\"desc\"],\n+ \"desc\": descsum_create(\"wpkh(\" + xpub + \"/0/1/*)\"),\n \"timestamp\": \"now\",\n+ \"range\": [0, 100],\n \"keypool\": True,\n+ \"active\": True,\n \"internal\": True,\n \"watchonly\": True\n }])\n assert_equal(res, [{\"success\": True}, {\"success\": True}])\n \n- w0.sendtoaddress(a2_receive, 10) # fund w3\n- self.nodes[0].generate(1)\n- self.sync_blocks()\n-\n- # w4 has private keys enabled, but only contains watch-only keys (from w2)\n- self.nodes[1].createwallet(wallet_name=\"w4\", disable_private_keys=False)\n- w4 = self.nodes[1].get_wallet_rpc(\"w4\")\n for _ in range(3):\n a2_receive = w2.getnewaddress()\n- res = w4.importmulti([{\n- \"desc\": w2.getaddressinfo(a2_receive)[\"desc\"],\n- \"timestamp\": \"now\",\n- \"keypool\": False,\n- \"watchonly\": True\n- }])\n- assert_equal(res, [{\"success\": True}])\n+ if not self.options.descriptors:\n+ # Because legacy wallets use exclusively hardened derivation, we can't do a ranged import like we do for descriptors\n+ a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation\n+ res = w3.importmulti([{\n+ \"desc\": w2.getaddressinfo(a2_receive)[\"desc\"],\n+ \"timestamp\": \"now\",\n+ \"keypool\": True,\n+ \"watchonly\": True\n+ },{\n+ \"desc\": w2.getaddressinfo(a2_change)[\"desc\"],\n+ \"timestamp\": \"now\",\n+ \"keypool\": True,\n+ \"internal\": True,\n+ \"watchonly\": True\n+ }])\n+ assert_equal(res, [{\"success\": True}, {\"success\": True}])\n \n- w0.sendtoaddress(a2_receive, 10) # fund w4\n+ w0.sendtoaddress(a2_receive, 10) # fund w3\n self.nodes[0].generate(1)\n self.sync_blocks()\n \n+ if not self.options.descriptors:\n+ # w4 has private keys enabled, but only contains watch-only keys (from w2)\n+ # This is legacy wallet behavior only as descriptor wallets don't allow watchonly and non-watchonly things in the same wallet.\n+ self.nodes[1].createwallet(wallet_name=\"w4\", disable_private_keys=False)\n+ w4 = self.nodes[1].get_wallet_rpc(\"w4\")\n+ for _ in range(3):\n+ a2_receive = w2.getnewaddress()\n+ res = w4.importmulti([{\n+ \"desc\": w2.getaddressinfo(a2_receive)[\"desc\"],\n+ \"timestamp\": \"now\",\n+ \"keypool\": False,\n+ \"watchonly\": True\n+ }])\n+ assert_equal(res, [{\"success\": True}])\n+\n+ w0.sendtoaddress(a2_receive, 10) # fund w4\n+ self.nodes[0].generate(1)\n+ self.sync_blocks()\n+\n self.log.info(\"Send to address...\")\n self.test_send(from_wallet=w0, to_wallet=w1, amount=1)\n self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True)\n@@ -241,11 +284,15 @@ class WalletSendTest(BitcoinTestFramework):\n res = w2.walletprocesspsbt(res[\"psbt\"])\n assert res[\"complete\"]\n \n- self.log.info(\"Create PSBT from wallet w4 with watch-only keys, sign with w2...\")\n- self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, \"Insufficient funds\"))\n- res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False)\n- res = w2.walletprocesspsbt(res[\"psbt\"])\n- assert res[\"complete\"]\n+ if not self.options.descriptors:\n+ # Descriptor wallets do not allow mixed watch-only and non-watch-only things in the same wallet.\n+ # This is specifically testing that w4 ignores its own private keys and creates a psbt with send\n+ # which is not something that needs to be tested in descriptor wallets.\n+ self.log.info(\"Create PSBT from wallet w4 with watch-only keys, sign with w2...\")\n+ self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, \"Insufficient funds\"))\n+ res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False)\n+ res = w2.walletprocesspsbt(res[\"psbt\"])\n+ assert res[\"complete\"]\n \n self.log.info(\"Create OP_RETURN...\")\n self.test_send(from_wallet=w0, to_wallet=w1, amount=1)\n" } ]
f692e8da1a75d3f3f938a21727514204725e52b8
cocotb/cocotb
12.01.2019 15:40:28
BSD 3-Clause New or Revised License
Use context managers to handle profiling This avoids needing to remember to disable the profiler by every return statement, and reads more cleanly than using a `try` / `finally`
[ { "change_type": "MODIFY", "old_path": "cocotb/scheduler.py", "new_path": "cocotb/scheduler.py", "diff": "@@ -71,6 +71,17 @@ from cocotb.triggers import (Trigger, GPITrigger, Timer, ReadOnly, PythonTrigger\n from cocotb.log import SimLog\n from cocotb.result import (TestComplete, TestError, ReturnValue, raise_error,\n create_error, ExternalException)\n+from cocotb.utils import nullcontext\n+\n+\n+class profiling_context(object):\n+ \"\"\" Context manager that profiles its contents \"\"\"\n+ def __enter__(self):\n+ _profile.enable()\n+\n+ def __exit__(self, *excinfo):\n+ _profile.disable()\n+\n \n class external_state(object):\n INIT = 0\n@@ -280,26 +291,26 @@ class Scheduler(object):\n if _profiling:\n ps = pstats.Stats(_profile).sort_stats('cumulative')\n ps.dump_stats(\"test_profile.pstat\")\n- _profile.enable()\n-\n- self._mode = Scheduler._MODE_NORMAL\n- if trigger is not None:\n- trigger.unprime()\n+ ctx = profiling_context()\n+ else:\n+ ctx = nullcontext()\n \n- # Issue previous test result, if there is one\n- if self._test_result is not None:\n- if _debug:\n- self.log.debug(\"Issue test result to regression object\")\n- cocotb.regression.handle_result(self._test_result)\n- self._test_result = None\n- if self._entrypoint is not None:\n- test = self._entrypoint\n- self._entrypoint = None\n- self.schedule(test)\n- self.advance()\n+ with ctx:\n+ self._mode = Scheduler._MODE_NORMAL\n+ if trigger is not None:\n+ trigger.unprime()\n \n- if _profiling:\n- _profile.disable()\n+ # Issue previous test result, if there is one\n+ if self._test_result is not None:\n+ if _debug:\n+ self.log.debug(\"Issue test result to regression object\")\n+ cocotb.regression.handle_result(self._test_result)\n+ self._test_result = None\n+ if self._entrypoint is not None:\n+ test = self._entrypoint\n+ self._entrypoint = None\n+ self.schedule(test)\n+ self.advance()\n \n def react(self, trigger, depth=0):\n \"\"\"React called when a trigger fires.\n@@ -308,142 +319,136 @@ class Scheduler(object):\n schedule them.\n \"\"\"\n if _profiling and not depth:\n- _profile.enable()\n-\n- # When a trigger fires it is unprimed internally\n- if _debug:\n- self.log.debug(\"Trigger fired: %s\" % str(trigger))\n- # trigger.unprime()\n+ ctx = profiling_context()\n+ else:\n+ ctx = nullcontext()\n \n- if self._mode == Scheduler._MODE_TERM:\n+ with ctx:\n+ # When a trigger fires it is unprimed internally\n if _debug:\n- self.log.debug(\"Ignoring trigger %s since we're terminating\" %\n- str(trigger))\n- return\n+ self.log.debug(\"Trigger fired: %s\" % str(trigger))\n+ # trigger.unprime()\n \n- if trigger is self._readonly:\n- self._mode = Scheduler._MODE_READONLY\n- # Only GPI triggers affect the simulator scheduling mode\n- elif isinstance(trigger, GPITrigger):\n- self._mode = Scheduler._MODE_NORMAL\n+ if self._mode == Scheduler._MODE_TERM:\n+ if _debug:\n+ self.log.debug(\"Ignoring trigger %s since we're terminating\" %\n+ str(trigger))\n+ return\n \n- # We're the only source of ReadWrite triggers which are only used for\n- # playing back any cached signal updates\n- if trigger is self._readwrite:\n+ if trigger is self._readonly:\n+ self._mode = Scheduler._MODE_READONLY\n+ # Only GPI triggers affect the simulator scheduling mode\n+ elif isinstance(trigger, GPITrigger):\n+ self._mode = Scheduler._MODE_NORMAL\n \n- if _debug:\n- self.log.debug(\"Writing cached signal updates\")\n+ # We're the only source of ReadWrite triggers which are only used for\n+ # playing back any cached signal updates\n+ if trigger is self._readwrite:\n+\n+ if _debug:\n+ self.log.debug(\"Writing cached signal updates\")\n \n- while self._writes:\n- handle, value = self._writes.popitem()\n- handle.setimmediatevalue(value)\n+ while self._writes:\n+ handle, value = self._writes.popitem()\n+ handle.setimmediatevalue(value)\n \n- self._readwrite.unprime()\n+ self._readwrite.unprime()\n \n- if _profiling:\n- _profile.disable()\n- return\n+ return\n \n- # Similarly if we've scheduled our next_timestep on way to readwrite\n- if trigger is self._next_timestep:\n+ # Similarly if we've scheduled our next_timestep on way to readwrite\n+ if trigger is self._next_timestep:\n \n- if not self._writes:\n- self.log.error(\n- \"Moved to next timestep without any pending writes!\")\n- else:\n- self.log.debug(\n- \"Priming ReadWrite trigger so we can playback writes\")\n- self._readwrite.prime(self.react)\n+ if not self._writes:\n+ self.log.error(\n+ \"Moved to next timestep without any pending writes!\")\n+ else:\n+ self.log.debug(\n+ \"Priming ReadWrite trigger so we can playback writes\")\n+ self._readwrite.prime(self.react)\n \n- if _profiling:\n- _profile.disable()\n- return\n+ return\n \n- if trigger not in self._trigger2coros:\n-\n- # GPI triggers should only be ever pending if there is an\n- # associated coroutine waiting on that trigger, otherwise it would\n- # have been unprimed already\n- if isinstance(trigger, GPITrigger):\n- self.log.critical(\n- \"No coroutines waiting on trigger that fired: %s\" %\n- str(trigger))\n-\n- trigger.log.info(\"I'm the culprit\")\n- # For Python triggers this isn't actually an error - we might do\n- # event.set() without knowing whether any coroutines are actually\n- # waiting on this event, for example\n- elif _debug:\n- self.log.debug(\n- \"No coroutines waiting on trigger that fired: %s\" %\n- str(trigger))\n-\n- if _profiling:\n- _profile.disable()\n- return\n+ if trigger not in self._trigger2coros:\n \n- # Scheduled coroutines may append to our waiting list so the first\n- # thing to do is pop all entries waiting on this trigger.\n- scheduling = self._trigger2coros.pop(trigger)\n+ # GPI triggers should only be ever pending if there is an\n+ # associated coroutine waiting on that trigger, otherwise it would\n+ # have been unprimed already\n+ if isinstance(trigger, GPITrigger):\n+ self.log.critical(\n+ \"No coroutines waiting on trigger that fired: %s\" %\n+ str(trigger))\n+\n+ trigger.log.info(\"I'm the culprit\")\n+ # For Python triggers this isn't actually an error - we might do\n+ # event.set() without knowing whether any coroutines are actually\n+ # waiting on this event, for example\n+ elif _debug:\n+ self.log.debug(\n+ \"No coroutines waiting on trigger that fired: %s\" %\n+ str(trigger))\n+\n+ return\n+\n+ # Scheduled coroutines may append to our waiting list so the first\n+ # thing to do is pop all entries waiting on this trigger.\n+ scheduling = self._trigger2coros.pop(trigger)\n \n- if _debug:\n- debugstr = \"\\n\\t\".join([coro.__name__ for coro in scheduling])\n- if len(scheduling):\n- debugstr = \"\\n\\t\" + debugstr\n- self.log.debug(\"%d pending coroutines for event %s%s\" %\n- (len(scheduling), str(trigger), debugstr))\n-\n- # This trigger isn't needed any more\n- trigger.unprime()\n-\n- # If the coroutine was waiting on multiple triggers we may be able\n- # to unprime the other triggers that didn't fire\n- scheduling_set = set(scheduling)\n- other_triggers = {\n- t\n- for coro in scheduling\n- for t in self._coro2triggers[coro]\n- } - {trigger}\n-\n- for pending in other_triggers:\n- # every coroutine waiting on this trigger is already being woken\n- if scheduling_set.issuperset(self._trigger2coros[pending]):\n- if pending.primed:\n- pending.unprime()\n- del self._trigger2coros[pending]\n-\n- for coro in scheduling:\n- if _debug:\n- self.log.debug(\"Scheduling coroutine %s\" % (coro.__name__))\n- self.schedule(coro, trigger=trigger)\n if _debug:\n- self.log.debug(\"Scheduled coroutine %s\" % (coro.__name__))\n+ debugstr = \"\\n\\t\".join([coro.__name__ for coro in scheduling])\n+ if len(scheduling):\n+ debugstr = \"\\n\\t\" + debugstr\n+ self.log.debug(\"%d pending coroutines for event %s%s\" %\n+ (len(scheduling), str(trigger), debugstr))\n+\n+ # This trigger isn't needed any more\n+ trigger.unprime()\n \n- if not depth:\n- # Schedule may have queued up some events so we'll burn through those\n- while self._pending_events:\n+ # If the coroutine was waiting on multiple triggers we may be able\n+ # to unprime the other triggers that didn't fire\n+ scheduling_set = set(scheduling)\n+ other_triggers = {\n+ t\n+ for coro in scheduling\n+ for t in self._coro2triggers[coro]\n+ } - {trigger}\n+\n+ for pending in other_triggers:\n+ # every coroutine waiting on this trigger is already being woken\n+ if scheduling_set.issuperset(self._trigger2coros[pending]):\n+ if pending.primed:\n+ pending.unprime()\n+ del self._trigger2coros[pending]\n+\n+ for coro in scheduling:\n if _debug:\n- self.log.debug(\"Scheduling pending event %s\" %\n- (str(self._pending_events[0])))\n- self._pending_events.pop(0).set()\n+ self.log.debug(\"Scheduling coroutine %s\" % (coro.__name__))\n+ self.schedule(coro, trigger=trigger)\n+ if _debug:\n+ self.log.debug(\"Scheduled coroutine %s\" % (coro.__name__))\n \n- while self._pending_triggers:\n- if _debug:\n- self.log.debug(\"Scheduling pending trigger %s\" %\n- (str(self._pending_triggers[0])))\n- self.react(self._pending_triggers.pop(0), depth=depth + 1)\n+ if not depth:\n+ # Schedule may have queued up some events so we'll burn through those\n+ while self._pending_events:\n+ if _debug:\n+ self.log.debug(\"Scheduling pending event %s\" %\n+ (str(self._pending_events[0])))\n+ self._pending_events.pop(0).set()\n \n- # We only advance for GPI triggers\n- if not depth and isinstance(trigger, GPITrigger):\n- self.advance()\n+ while self._pending_triggers:\n+ if _debug:\n+ self.log.debug(\"Scheduling pending trigger %s\" %\n+ (str(self._pending_triggers[0])))\n+ self.react(self._pending_triggers.pop(0), depth=depth + 1)\n \n- if _debug:\n- self.log.debug(\"All coroutines scheduled, handing control back\"\n- \" to simulator\")\n+ # We only advance for GPI triggers\n+ if not depth and isinstance(trigger, GPITrigger):\n+ self.advance()\n+\n+ if _debug:\n+ self.log.debug(\"All coroutines scheduled, handing control back\"\n+ \" to simulator\")\n \n- if _profiling:\n- _profile.disable()\n- return\n \n def unschedule(self, coro):\n \"\"\"Unschedule a coroutine. Unprime any pending triggers\"\"\"\n" }, { "change_type": "MODIFY", "old_path": "cocotb/utils.py", "new_path": "cocotb/utils.py", "diff": "@@ -491,6 +491,26 @@ class ParametrizedSingleton(type):\n return self\n \n \n+# backport of Python 3.7's contextlib.nullcontext\n+class nullcontext(object):\n+ \"\"\"Context manager that does no additional processing.\n+ Used as a stand-in for a normal context manager, when a particular\n+ block of code is only sometimes used with a normal context manager:\n+ cm = optional_cm if condition else nullcontext()\n+ with cm:\n+ # Perform operation, using optional_cm if condition is True\n+ \"\"\"\n+\n+ def __init__(self, enter_result=None):\n+ self.enter_result = enter_result\n+\n+ def __enter__(self):\n+ return self.enter_result\n+\n+ def __exit__(self, *excinfo):\n+ pass\n+\n+\n if __name__ == \"__main__\":\n import random\n a = \"\"\n" } ]
f61957b0ce7d0505126acb3ebf0ec1faa6184d52
cocotb/cocotb
21.04.2020 10:37:35
BSD 3-Clause New or Revised License
Add ability to construct RegressionManager with test/hook list Standard constructor that uses discovery has been moved to the `from_discovery` class method.
[ { "change_type": "MODIFY", "old_path": "cocotb/__init__.py", "new_path": "cocotb/__init__.py", "diff": "@@ -176,7 +176,7 @@ def _initialise_testbench(root_name):\n \n # start Regression Manager\n global regression_manager\n- regression_manager = RegressionManager(dut)\n+ regression_manager = RegressionManager.from_discovery(dut)\n regression_manager.execute()\n \n _rlock.release()\n" }, { "change_type": "MODIFY", "old_path": "cocotb/regression.py", "new_path": "cocotb/regression.py", "diff": "@@ -76,18 +76,23 @@ def _my_import(name: str) -> Any:\n return mod\n \n \n+_logger = SimLog(__name__)\n+\n+\n class RegressionManager:\n \"\"\"Encapsulates all regression capability into a single place\"\"\"\n \n- def __init__(self, dut: SimHandle):\n+ def __init__(self, dut: SimHandle, tests: Iterable[Test], hooks: Iterable[Hook]):\n \"\"\"\n Args:\n dut (SimHandle): The root handle to pass into test functions.\n+ tests (Iterable[Test]): tests to run\n+ hooks (Iterable[Hook]): hooks to tun\n \"\"\"\n self._dut = dut\n self._test_task = None\n self._cov = None\n- self.log = SimLog(\"cocotb.regression\")\n+ self.log = _logger\n self.start_time = time.time()\n self.test_results = []\n self.count = 0\n@@ -118,7 +123,7 @@ class RegressionManager:\n # Test Discovery\n ####################\n self._queue = []\n- for test in self.discover_tests():\n+ for test in tests:\n self.log.info(\"Found test {}.{}\".format(test.__module__, test.__qualname__))\n self._queue.append(test)\n self.ntests = len(self._queue)\n@@ -130,12 +135,31 @@ class RegressionManager:\n \n # Process Hooks\n ###################\n- for hook in self.discover_hooks():\n+ for hook in hooks:\n self.log.info(\"Found hook {}.{}\".format(hook.__module__, hook.__qualname__))\n self._init_hook(hook)\n \n- def discover_tests(self) -> Iterable[Test]:\n+ @classmethod\n+ def from_discovery(cls, dut: SimHandle):\n+ \"\"\"\n+ Obtains the test and hook lists by discovery.\n+\n+ See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.\n+\n+ Args:\n+ dut (SimHandle): The root handle to pass into test functions.\n+ \"\"\"\n+ tests = cls._discover_tests()\n+ hooks = cls._discover_hooks()\n+ return cls(dut, tests, hooks)\n \n+ @staticmethod\n+ def _discover_tests() -> Iterable[Test]:\n+ \"\"\"\n+ Discovers tests in files automatically.\n+\n+ See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.\n+ \"\"\"\n module_str = os.getenv('MODULE')\n test_str = os.getenv('TESTCASE')\n \n@@ -146,14 +170,14 @@ class RegressionManager:\n \n for module_name in modules:\n try:\n- self.log.debug(\"Python Path: \" + \",\".join(sys.path))\n- self.log.debug(\"PWD: \" + os.getcwd())\n+ _logger.debug(\"Python Path: \" + \",\".join(sys.path))\n+ _logger.debug(\"PWD: \" + os.getcwd())\n module = _my_import(module_name)\n except Exception as E:\n- self.log.critical(\"Failed to import module %s: %s\", module_name, E)\n- self.log.info(\"MODULE variable was \\\"%s\\\"\", \".\".join(modules))\n- self.log.info(\"Traceback: \")\n- self.log.info(traceback.format_exc())\n+ _logger.critical(\"Failed to import module %s: %s\", module_name, E)\n+ _logger.info(\"MODULE variable was \\\"%s\\\"\", \".\".join(modules))\n+ _logger.info(\"Traceback: \")\n+ _logger.info(traceback.format_exc())\n raise\n \n if test_str:\n@@ -163,13 +187,13 @@ class RegressionManager:\n try:\n test = getattr(module, test_name)\n except AttributeError:\n- self.log.error(\"Requested test %s wasn't found in module %s\", test_name, module_name)\n+ _logger.error(\"Requested test %s wasn't found in module %s\", test_name, module_name)\n err = AttributeError(\"Test %s doesn't exist in %s\" % (test_name, module_name))\n raise err from None # discard nested traceback\n \n if not hasattr(test, \"im_test\"):\n- self.log.error(\"Requested %s from module %s isn't a cocotb.test decorated coroutine\",\n- test_name, module_name)\n+ _logger.error(\"Requested %s from module %s isn't a cocotb.test decorated coroutine\",\n+ test_name, module_name)\n raise ImportError(\"Failed to find requested test %s\" % test_name)\n yield test\n \n@@ -181,13 +205,18 @@ class RegressionManager:\n if hasattr(thing, \"im_test\"):\n yield thing\n \n- def discover_hooks(self) -> Iterable[Hook]:\n+ @staticmethod\n+ def _discover_hooks() -> Iterable[Hook]:\n+ \"\"\"\n+ Discovers hooks automatically.\n \n+ See :envvar:`COCOTB_HOOKS` for details on how hooks are discovered.\n+ \"\"\"\n hooks_str = os.getenv('COCOTB_HOOKS', '')\n hooks = [s.strip() for s in hooks_str.split(',') if s.strip()]\n \n for module_name in hooks:\n- self.log.info(\"Loading hook from module '\" + module_name + \"'\")\n+ _logger.info(\"Loading hook from module '\" + module_name + \"'\")\n module = _my_import(module_name)\n \n for thing in vars(module).values():\n@@ -619,7 +648,7 @@ class TestFactory:\n self.args = args\n self.kwargs_constant = kwargs\n self.kwargs = {}\n- self.log = SimLog(\"cocotb.regression\")\n+ self.log = _logger\n \n def add_option(self, name, optionlist):\n \"\"\"Add a named option to the test.\n" } ]
3c35805c128d9ff6e1d987d4d1ace6f55734df6e
cocotb/cocotb
09.04.2020 14:37:36
BSD 3-Clause New or Revised License
Improve __repr__ for RunningTask objects. Displays task name, status, and current coroutine. If the task is pending on a trigger, displays the trigger. If the task is finished, displays the outcome.
[ { "change_type": "MODIFY", "old_path": "cocotb/decorators.py", "new_path": "cocotb/decorators.py", "diff": "@@ -35,7 +35,7 @@ import os\n import cocotb\n from cocotb.log import SimLog\n from cocotb.result import ReturnValue\n-from cocotb.utils import get_sim_time, lazy_property, remove_traceback_frames\n+from cocotb.utils import get_sim_time, lazy_property, remove_traceback_frames, extract_coro_stack\n from cocotb import outcomes\n \n # Sadly the Python standard logging module is very slow so it's better not to\n@@ -87,6 +87,8 @@ class RunningTask:\n triggers to fire.\n \"\"\"\n \n+ _id_count = 0 # used by the scheduler for debug\n+\n def __init__(self, inst):\n \n if inspect.iscoroutine(inst):\n@@ -107,13 +109,16 @@ class RunningTask:\n raise TypeError(\n \"%s isn't a valid coroutine! Did you forget to use the yield keyword?\" % inst)\n self._coro = inst\n- self.__name__ = inst.__name__\n- self.__qualname__ = inst.__qualname__\n self._started = False\n self._callbacks = []\n self._outcome = None\n self._trigger = None\n \n+ self._task_id = self._id_count\n+ RunningTask._id_count += 1\n+ self.__name__ = \"Task %d\" % self._task_id\n+ self.__qualname__ = self.__name__\n+\n @lazy_property\n def log(self):\n # Creating a logger is expensive, only do it if we actually plan to\n@@ -134,7 +139,48 @@ class RunningTask:\n return self\n \n def __str__(self):\n- return str(self.__qualname__)\n+ return \"<{}>\".format(self.__name__)\n+\n+ def _get_coro_stack(self):\n+ \"\"\"Get the coroutine callstack of this Task.\"\"\"\n+ coro_stack = extract_coro_stack(self._coro)\n+\n+ # Remove Trigger.__await__() from the stack, as it's not really useful\n+ if self._natively_awaitable and len(coro_stack):\n+ if coro_stack[-1].name == '__await__':\n+ coro_stack.pop()\n+\n+ return coro_stack\n+\n+ def __repr__(self):\n+ coro_stack = self._get_coro_stack()\n+\n+ if cocotb.scheduler._current_task is self:\n+ fmt = \"<{name} running coro={coro}()>\"\n+ elif self._finished:\n+ fmt = \"<{name} finished coro={coro}() outcome={outcome}>\"\n+ elif self._trigger is not None:\n+ fmt = \"<{name} pending coro={coro}() trigger={trigger}>\"\n+ elif not self._started:\n+ fmt = \"<{name} created coro={coro}()>\"\n+ else:\n+ fmt = \"<{name} adding coro={coro}()>\"\n+\n+ try:\n+ coro_name = coro_stack[-1].name\n+ # coro_stack may be empty if:\n+ # - exhausted generator\n+ # - finished coroutine\n+ except IndexError:\n+ coro_name = self._coro.__name__\n+\n+ repr_string = fmt.format(\n+ name=self.__name__,\n+ coro=coro_name,\n+ trigger=self._trigger,\n+ outcome=self._outcome\n+ )\n+ return repr_string\n \n def _advance(self, outcome):\n \"\"\"Advance to the next yield in this coroutine.\n@@ -243,7 +289,7 @@ class RunningTest(RunningCoroutine):\n def __init__(self, inst, parent):\n self.error_messages = []\n RunningCoroutine.__init__(self, inst, parent)\n- self.log = SimLog(\"cocotb.test.%s\" % self.__qualname__, id(self))\n+ self.log = SimLog(\"cocotb.test.%s\" % inst.__qualname__, id(self))\n self.started = False\n self.start_time = 0\n self.start_sim_time = 0\n@@ -251,11 +297,15 @@ class RunningTest(RunningCoroutine):\n self.expect_error = parent.expect_error\n self.skip = parent.skip\n self.stage = parent.stage\n- self._id = parent._id\n+ self.__name__ = \"Test %s\" % inst.__name__\n+ self.__qualname__ = \"Test %s\" % inst.__qualname__\n \n # make sure not to create a circular reference here\n self.handler = RunningTest.ErrorLogHandler(self.error_messages.append)\n \n+ def __str__(self):\n+ return \"<{}>\".format(self.__name__)\n+\n def _advance(self, outcome):\n if not self.started:\n self.log.info(\"Starting test: \\\"%s\\\"\\nDescription: %s\" %\n" }, { "change_type": "MODIFY", "old_path": "cocotb/utils.py", "new_path": "cocotb/utils.py", "diff": "@@ -31,6 +31,7 @@ import ctypes\n import math\n import os\n import sys\n+import traceback\n import weakref\n import functools\n import warnings\n@@ -580,3 +581,39 @@ def remove_traceback_frames(tb_or_exc, frame_names):\n assert tb.tb_frame.f_code.co_name == frame_name\n tb = tb.tb_next\n return tb\n+\n+\n+def walk_coro_stack(coro):\n+ \"\"\"Walk down the coroutine stack, starting at *coro*.\n+\n+ Supports coroutines and generators.\n+ \"\"\"\n+ while coro is not None:\n+ try:\n+ f = getattr(coro, 'cr_frame')\n+ coro = coro.cr_await\n+ except AttributeError:\n+ try:\n+ f = getattr(coro, 'gi_frame')\n+ coro = coro.gi_yieldfrom\n+ except AttributeError:\n+ f = None\n+ coro = None\n+ if f is not None:\n+ yield (f, f.f_lineno)\n+\n+\n+def extract_coro_stack(coro, limit=None):\n+ \"\"\"Create a list of pre-processed entries from the coroutine stack.\n+\n+ This is based on :func:`traceback.extract_tb`.\n+\n+ If *limit* is omitted or ``None``, all entries are extracted.\n+ The list is a :class:`traceback.StackSummary` object, and\n+ each entry in the list is a :class:`traceback.FrameSummary` object\n+ containing attributes ``filename``, ``lineno``, ``name``, and ``line``\n+ representing the information that is usually printed for a stack\n+ trace. The line is a string with leading and trailing\n+ whitespace stripped; if the source is not available it is ``None``.\n+ \"\"\"\n+ return traceback.StackSummary.extract(walk_coro_stack(coro), limit=limit)\n" } ]
1be51548fdbd7e7655d8dfdbc5ae1589ff432194
cocotb/cocotb
06.01.2020 15:13:16
BSD 3-Clause New or Revised License
Convert resolve() to str.translate(). New translation table class _ResolveTable provides resolve functionality and improves performance. This preserves current behavior of COCOTB_RESOLVE_X = RANDOM, where all instances of the character are translated to the same value.
[ { "change_type": "MODIFY", "old_path": "cocotb/binary.py", "new_path": "cocotb/binary.py", "diff": "@@ -29,27 +29,61 @@\n \n import os\n import random\n+import re\n import warnings\n \n+\n+_RESOLVE_TO_0 = \"-lL\"\n+_RESOLVE_TO_1 = \"hH\"\n+_RESOLVE_TO_CHOICE = \"xXzZuUwW\"\n resolve_x_to = os.getenv('COCOTB_RESOLVE_X', \"VALUE_ERROR\")\n \n \n-def resolve(string):\n- for char in BinaryValue._resolve_to_0:\n- string = string.replace(char, \"0\")\n- for char in BinaryValue._resolve_to_1:\n- string = string.replace(char, \"1\")\n- for char in BinaryValue._resolve_to_error:\n- if resolve_x_to == \"VALUE_ERROR\" and char in string:\n- raise ValueError(\"Unable to resolve to binary >%s<\" % string)\n+class _ResolveTable(dict):\n+ \"\"\"Translation table class for resolving binary strings.\n+\n+ For use with :func:`str.translate()`, which indexes into table with Unicode ordinals.\n+ \"\"\"\n+ def __init__(self):\n+ self.update({ord(\"0\"): ord(\"0\"), ord(\"1\"): ord(\"1\")})\n+ self.update({ord(k): ord(\"0\") for k in _RESOLVE_TO_0})\n+ self.update({ord(k): ord(\"1\") for k in _RESOLVE_TO_1})\n+\n+ # Do not resolve if resolve_x_to is not set to one of the supported values\n+ def no_resolve(key):\n+ return key\n+ self.resolve_x = no_resolve\n+\n+ if resolve_x_to == \"VALUE_ERROR\":\n+ def resolve_error(key):\n+ raise ValueError(\"Unresolvable bit in binary string: '{}'\".format(chr(key)))\n+ self.resolve_x = resolve_error\n elif resolve_x_to == \"ZEROS\":\n- string = string.replace(char, \"0\")\n+ self.update({ord(k): ord(\"0\") for k in _RESOLVE_TO_CHOICE})\n elif resolve_x_to == \"ONES\":\n- string = string.replace(char, \"1\")\n+ self.update({ord(k): ord(\"1\") for k in _RESOLVE_TO_CHOICE})\n elif resolve_x_to == \"RANDOM\":\n- bits = \"{0:b}\".format(random.getrandbits(1))\n- string = string.replace(char, bits)\n- return string\n+ def resolve_random(key):\n+ # convert to correct Unicode ordinal:\n+ # ord('0') = 48\n+ # ord('1') = 49\n+ return random.getrandbits(1) + 48\n+ self.resolve_x = resolve_random\n+\n+ self._resolve_to_choice = {ord(c) for c in _RESOLVE_TO_CHOICE}\n+\n+ def __missing__(self, key):\n+ if key in self._resolve_to_choice:\n+ return self.resolve_x(key)\n+ else:\n+ return key\n+\n+\n+_resolve_table = _ResolveTable()\n+\n+\n+def resolve(string):\n+ return string.translate(_resolve_table)\n \n \n def _clog2(val):\n@@ -89,10 +123,7 @@ class BinaryValue:\n b'*'\n \n \"\"\"\n- _resolve_to_0 = \"-lL\" # noqa\n- _resolve_to_1 = \"hH\" # noqa\n- _resolve_to_error = \"xXzZuUwW\" # Resolve to a ValueError() since these usually mean something is wrong\n- _permitted_chars = _resolve_to_0 +_resolve_to_1 + _resolve_to_error + \"01\" # noqa\n+ _permitted_chars = _RESOLVE_TO_0 +_RESOLVE_TO_1 + _RESOLVE_TO_CHOICE + \"01\" # noqa\n \n def __init__(self, value=None, n_bits=None, bigEndian=True,\n binaryRepresentation=BinaryRepresentation.UNSIGNED,\n@@ -197,10 +228,10 @@ class BinaryValue:\n return binstr\n \n def _convert_from_unsigned(self, x):\n- return int(resolve(x), 2)\n+ return int(x.translate(_resolve_table), 2)\n \n def _convert_from_signed_mag(self, x):\n- rv = int(resolve(self._str[1:]), 2)\n+ rv = int(self._str[1:].translate(_resolve_table), 2)\n if self._str[0] == '1':\n rv = rv * -1\n return rv\n@@ -213,7 +244,7 @@ class BinaryValue:\n if x[0] == '1':\n rv = rv * -1\n else:\n- rv = int(resolve(x), 2)\n+ rv = int(x.translate(_resolve_table), 2)\n return rv\n \n def _invert(self, x):\n@@ -312,7 +343,7 @@ class BinaryValue:\n @property\n def signed_integer(self):\n \"\"\"The signed integer representation of the underlying vector.\"\"\"\n- ival = int(resolve(self._str), 2)\n+ ival = int(self._str.translate(_resolve_table), 2)\n bits = len(self._str)\n signbit = (1 << (bits - 1))\n if (ival & signbit) == 0:\n@@ -337,7 +368,7 @@ class BinaryValue:\n This is similar to the SystemVerilog Assertion ``$isunknown`` system function\n or the VHDL function ``is_x`` (with an inverted meaning).\n \"\"\"\n- return not any(char in self._str for char in BinaryValue._resolve_to_error)\n+ return not any(char in self._str for char in _RESOLVE_TO_CHOICE)\n \n @property\n def buff(self) -> bytes:\n@@ -351,7 +382,7 @@ class BinaryValue:\n Note that for older versions used with Python 2 these types were\n indistinguishable.\n \"\"\"\n- bits = resolve(self._str)\n+ bits = self._str.translate(_resolve_table)\n \n if len(bits) % 8:\n bits = \"0\" * (8 - len(bits) % 8) + bits\n" } ]
15dc5aa37dfc240a400fd01584eb711a4802ae06
appscale/gts
04.01.2017 15:24:24
Apache License 2.0
Create separate set of constants for operations This is to differentiate between transaction table values and entity operations.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/cassandra_env/cassandra_interface.py", "new_path": "AppDB/appscale/datastore/cassandra_env/cassandra_interface.py", "diff": "@@ -19,6 +19,7 @@ from cassandra.query import ValueSequence\n from .. import dbconstants\n from .. import helper_functions\n from ..dbconstants import AppScaleDBConnectionError\n+from ..dbconstants import Operations\n from ..dbconstants import TxnActions\n from ..dbinterface import AppDBInterface\n from ..unpackaged import APPSCALE_LIB_DIR\n@@ -97,29 +98,29 @@ def deletions_for_entity(entity, composite_indices=()):\n for entry in asc_rows:\n deletions.append({'table': dbconstants.ASC_PROPERTY_TABLE,\n 'key': entry[0],\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n dsc_rows = get_index_kv_from_tuple(\n [(prefix, entity)], reverse=True)\n for entry in dsc_rows:\n deletions.append({'table': dbconstants.DSC_PROPERTY_TABLE,\n 'key': entry[0],\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n for key in get_composite_indexes_rows([entity], composite_indices):\n deletions.append({'table': dbconstants.COMPOSITE_TABLE,\n 'key': key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n entity_key = get_entity_key(prefix, entity.key().path())\n deletions.append({'table': dbconstants.APP_ENTITY_TABLE,\n 'key': entity_key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n kind_key = get_kind_key(prefix, entity.key().path())\n deletions.append({'table': dbconstants.APP_KIND_TABLE,\n 'key': kind_key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n return deletions\n \n@@ -163,14 +164,14 @@ def index_deletions(old_entity, new_entity, composite_indices=()):\n [app_id, namespace, kind, prop.name(), value, entity_key])\n deletions.append({'table': dbconstants.ASC_PROPERTY_TABLE,\n 'key': key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n reverse_key = dbconstants.KEY_DELIMITER.join(\n [app_id, namespace, kind, prop.name(),\n helper_functions.reverse_lex(value), entity_key])\n deletions.append({'table': dbconstants.DSC_PROPERTY_TABLE,\n 'key': reverse_key,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n changed_prop_names = set(changed_props.keys())\n for index in composite_indices:\n@@ -187,7 +188,7 @@ def index_deletions(old_entity, new_entity, composite_indices=()):\n for entry in (old_entries - new_entries):\n deletions.append({'table': dbconstants.COMPOSITE_TABLE,\n 'key': entry,\n- 'operation': TxnActions.DELETE})\n+ 'operation': Operations.DELETE})\n \n return deletions\n \n@@ -218,7 +219,7 @@ def mutations_for_entity(entity, txn, current_value=None,\n dbconstants.APP_ENTITY_SCHEMA[1]: str(txn)}\n mutations.append({'table': dbconstants.APP_ENTITY_TABLE,\n 'key': entity_key,\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': entity_value})\n \n reference_value = {'reference': entity_key}\n@@ -226,27 +227,27 @@ def mutations_for_entity(entity, txn, current_value=None,\n kind_key = get_kind_key(prefix, entity.key().path())\n mutations.append({'table': dbconstants.APP_KIND_TABLE,\n 'key': kind_key,\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': reference_value})\n \n asc_rows = get_index_kv_from_tuple([(prefix, entity)])\n for entry in asc_rows:\n mutations.append({'table': dbconstants.ASC_PROPERTY_TABLE,\n 'key': entry[0],\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': reference_value})\n \n dsc_rows = get_index_kv_from_tuple([(prefix, entity)], reverse=True)\n for entry in dsc_rows:\n mutations.append({'table': dbconstants.DSC_PROPERTY_TABLE,\n 'key': entry[0],\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': reference_value})\n \n for key in get_composite_indexes_rows([entity], composite_indices):\n mutations.append({'table': dbconstants.COMPOSITE_TABLE,\n 'key': key,\n- 'operation': TxnActions.PUT,\n+ 'operation': Operations.PUT,\n 'values': reference_value})\n \n return mutations\n@@ -479,7 +480,7 @@ class DatastoreProxy(AppDBInterface):\n prepared_statements = {'insert': {}, 'delete': {}}\n for mutation in mutations:\n table = mutation['table']\n- if mutation['operation'] == TxnActions.PUT:\n+ if mutation['operation'] == Operations.PUT:\n if table not in prepared_statements['insert']:\n prepared_statements['insert'][table] = self.prepare_insert(table)\n values = mutation['values']\n@@ -488,7 +489,7 @@ class DatastoreProxy(AppDBInterface):\n prepared_statements['insert'][table],\n (bytearray(mutation['key']), column, bytearray(values[column]))\n )\n- elif mutation['operation'] == TxnActions.DELETE:\n+ elif mutation['operation'] == Operations.DELETE:\n if table not in prepared_statements['delete']:\n prepared_statements['delete'][table] = self.prepare_delete(table)\n batch.add(\n@@ -513,7 +514,7 @@ class DatastoreProxy(AppDBInterface):\n statements_and_params = []\n for mutation in mutations:\n table = mutation['table']\n- if mutation['operation'] == TxnActions.PUT:\n+ if mutation['operation'] == Operations.PUT:\n if table not in prepared_statements['insert']:\n prepared_statements['insert'][table] = self.prepare_insert(table)\n values = mutation['values']\n@@ -522,7 +523,7 @@ class DatastoreProxy(AppDBInterface):\n bytearray(values[column]))\n statements_and_params.append(\n (prepared_statements['insert'][table], params))\n- elif mutation['operation'] == TxnActions.DELETE:\n+ elif mutation['operation'] == Operations.DELETE:\n if table not in prepared_statements['delete']:\n prepared_statements['delete'][table] = self.prepare_delete(table)\n params = (bytearray(mutation['key']),)\n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/dbconstants.py", "new_path": "AppDB/appscale/datastore/dbconstants.py", "diff": "@@ -162,6 +162,12 @@ class TxnActions(object):\n ENQUEUE_TASK = '3'\n \n \n+class Operations(object):\n+ \"\"\" Possible datastore operations on entities. \"\"\"\n+ PUT = 'put'\n+ DELETE = 'delete'\n+\n+\n ###############################\n # Generic Datastore Exceptions\n ###############################\n" }, { "change_type": "MODIFY", "old_path": "AppDB/test/unit/test_datastore_server.py", "new_path": "AppDB/test/unit/test_datastore_server.py", "diff": "@@ -899,9 +899,9 @@ class TestDatastoreServer(unittest.TestCase):\n mutations = mutations_for_entity(entity, txn, new_entity)\n self.assertEqual(len(mutations), 6)\n self.assertEqual(mutations[0]['table'], dbconstants.ASC_PROPERTY_TABLE)\n- self.assertEqual(mutations[0]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[0]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[1]['table'], dbconstants.DSC_PROPERTY_TABLE)\n- self.assertEqual(mutations[1]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[1]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[2]['table'], dbconstants.APP_ENTITY_TABLE)\n self.assertEqual(mutations[3]['table'], dbconstants.APP_KIND_TABLE)\n self.assertEqual(mutations[4]['table'], dbconstants.ASC_PROPERTY_TABLE)\n@@ -937,11 +937,11 @@ class TestDatastoreServer(unittest.TestCase):\n (composite_index,))\n self.assertEqual(len(mutations), 10)\n self.assertEqual(mutations[0]['table'], dbconstants.ASC_PROPERTY_TABLE)\n- self.assertEqual(mutations[0]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[0]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[1]['table'], dbconstants.DSC_PROPERTY_TABLE)\n- self.assertEqual(mutations[1]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[1]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[2]['table'], dbconstants.COMPOSITE_TABLE)\n- self.assertEqual(mutations[2]['operation'], dbconstants.TxnActions.DELETE)\n+ self.assertEqual(mutations[2]['operation'], dbconstants.Operations.DELETE)\n self.assertEqual(mutations[3]['table'], dbconstants.APP_ENTITY_TABLE)\n self.assertEqual(mutations[4]['table'], dbconstants.APP_KIND_TABLE)\n self.assertEqual(mutations[5]['table'], dbconstants.ASC_PROPERTY_TABLE)\n" } ]
2cd36c45d30275f462fe22bf8e068e22f858f009
appscale/gts
17.02.2017 14:42:44
Apache License 2.0
Move shared static functions to utils This allows other modules like the Celery worker to use them.
[ { "change_type": "MODIFY", "old_path": "AppTaskQueue/appscale/taskqueue/distributed_tq.py", "new_path": "AppTaskQueue/appscale/taskqueue/distributed_tq.py", "diff": "@@ -24,7 +24,9 @@ from task import Task\n from tq_config import TaskQueueConfig\n from .unpackaged import APPSCALE_LIB_DIR\n from .unpackaged import APPSCALE_PYTHON_APPSERVER\n-from .utils import logger\n+from .utils import (get_celery_queue_name,\n+ get_queue_function_name,\n+ logger)\n \n sys.path.append(APPSCALE_LIB_DIR)\n import appscale_info\n" }, { "change_type": "MODIFY", "old_path": "AppTaskQueue/appscale/taskqueue/tq_config.py", "new_path": "AppTaskQueue/appscale/taskqueue/tq_config.py", "diff": "@@ -9,7 +9,10 @@ from queue import PullQueue\n from queue import PushQueue\n from unpackaged import APPSCALE_LIB_DIR\n from unpackaged import APPSCALE_PYTHON_APPSERVER\n-from .utils import logger\n+from .utils import (get_celery_annotation_name,\n+ get_celery_queue_name,\n+ get_celery_worker_module_name,\n+ logger)\n \n sys.path.append(APPSCALE_PYTHON_APPSERVER)\n from google.appengine.api import queueinfo\n@@ -283,57 +286,6 @@ queue:\n file_io.delete(worker_file)\n file_io.delete(config_file)\n \n- @staticmethod\n- def get_queue_function_name(queue_name):\n- \"\"\" Returns the function name of a queue which is not the queue name for\n- namespacing and collision reasons.\n-\n- Args:\n- queue_name: The name of a queue.\n- Returns:\n- The string representing the function name.\n- \"\"\"\n- # Remove '-' because that character is not valid for a function name.\n- queue_name = queue_name.replace('-', '_')\n- return \"queue___%s\" % queue_name \n-\n- @staticmethod\n- def get_celery_annotation_name(app_id, queue_name):\n- \"\"\" Returns the annotation name for a celery configuration of a queue\n- for a given application id.\n- \n- Args:\n- app_id: The application ID.\n- queue_name: The application queue name.\n- Returns:\n- A string for the annotation tag.\n- \"\"\" \n- module_name = TaskQueueConfig.get_celery_worker_module_name(app_id)\n- function_name = TaskQueueConfig.get_queue_function_name(queue_name)\n- return \"%s.%s\" % (module_name, function_name)\n-\n- @staticmethod\n- def get_celery_worker_script_path(app_id):\n- \"\"\" Returns the full path of the worker script used for Celery.\n- \n- Args:\n- app_id: The application ID.\n- Returns:\n- A string of the full file name of the worker script.\n- \"\"\"\n- return TaskQueueConfig.CELERY_WORKER_DIR + \"app___\" + app_id + \".py\"\n-\n- @staticmethod\n- def get_celery_worker_module_name(app_id):\n- \"\"\" Returns the python module name of the queue worker script.\n- \n- Args:\n- app_id: The application ID.\n- Returns:\n- A string of the module name.\n- \"\"\"\n- return \"app___\" + app_id \n-\n @staticmethod\n def get_celery_configuration_path(app_id):\n \"\"\" Returns the full path of the configuration used for Celery.\n@@ -360,14 +312,12 @@ queue:\n if not isinstance(queue, PushQueue):\n continue\n \n- celery_name = TaskQueueConfig.get_celery_queue_name(\n- self._app_id, queue.name)\n+ celery_name = get_celery_queue_name(self._app_id, queue.name)\n queue_str = \"Queue('{name}', Exchange('{app}'), routing_key='{key}'),\"\\\n .format(name=celery_name, app=self._app_id, key=celery_name)\n celery_queues.append(queue_str)\n \n- annotation_name = TaskQueueConfig.get_celery_annotation_name(\n- self._app_id, queue.name)\n+ annotation_name = get_celery_annotation_name(self._app_id, queue.name)\n annotation = \"'{name}': {{'rate_limit': '{rate}'}},\".format(\n name=annotation_name, rate=queue.rate)\n annotations.append(annotation)\n@@ -410,16 +360,3 @@ CELERYD_PREFETCH_MULTIPLIER = 1\n The primary loadbalancer IP/hostname.\n \"\"\"\n return appscale_info.get_login_ip()\n-\n- @staticmethod\n- def get_celery_queue_name(app_id, queue_name):\n- \"\"\" Gets a usable queue name for celery to prevent collisions where\n- mulitple apps have the same name for a queue.\n- \n- Args:\n- app_id: The application ID.\n- queue_name: String name of the queue.\n- Returns:\n- A string to reference the queue name in celery.\n- \"\"\"\n- return app_id + \"___\" + queue_name\n" }, { "change_type": "MODIFY", "old_path": "AppTaskQueue/appscale/taskqueue/utils.py", "new_path": "AppTaskQueue/appscale/taskqueue/utils.py", "diff": "@@ -10,3 +10,56 @@ from constants import LOG_FORMAT\n logging.basicConfig(format=LOG_FORMAT)\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n+\n+\n+def get_celery_worker_module_name(app_id):\n+ \"\"\" Returns the python module name of the queue worker script.\n+\n+ Args:\n+ app_id: The application ID.\n+ Returns:\n+ A string of the module name.\n+ \"\"\"\n+ return 'app___' + app_id\n+\n+\n+def get_celery_queue_name(app_id, queue_name):\n+ \"\"\" Gets a usable queue name for celery to prevent collisions where\n+ mulitple apps have the same name for a queue.\n+\n+ Args:\n+ app_id: The application ID.\n+ queue_name: String name of the queue.\n+ Returns:\n+ A string to reference the queue name in celery.\n+ \"\"\"\n+ return app_id + \"___\" + queue_name\n+\n+\n+def get_queue_function_name(queue_name):\n+ \"\"\" Returns the function name of a queue which is not the queue name for\n+ namespacing and collision reasons.\n+\n+ Args:\n+ queue_name: The name of a queue.\n+ Returns:\n+ The string representing the function name.\n+ \"\"\"\n+ # Remove '-' because that character is not valid for a function name.\n+ queue_name = queue_name.replace('-', '_')\n+ return \"queue___%s\" % queue_name\n+\n+\n+def get_celery_annotation_name(app_id, queue_name):\n+ \"\"\" Returns the annotation name for a celery configuration of a queue\n+ for a given application id.\n+\n+ Args:\n+ app_id: The application ID.\n+ queue_name: The application queue name.\n+ Returns:\n+ A string for the annotation tag.\n+ \"\"\"\n+ module_name = get_celery_worker_module_name(app_id)\n+ function_name = get_queue_function_name(queue_name)\n+ return \"%s.%s\" % (module_name, function_name)\n" } ]
a3814f96f5c9e27ddf3e00dc6ee4ee6401eea503
appscale/gts
06.07.2017 23:04:09
Apache License 2.0
Add MonitOperator This uses Monit's XML API and groups closely-timed Monit reloads together.
[ { "change_type": "MODIFY", "old_path": "common/appscale/common/constants.py", "new_path": "common/appscale/common/constants.py", "diff": "@@ -13,6 +13,15 @@ class HTTPCodes(object):\n INTERNAL_ERROR = 500\n NOT_IMPLEMENTED = 501\n \n+\n+class MonitStates(object):\n+ MISSING = 'missing'\n+ PENDING = 'pending' # Monit is trying to either start or stop the process.\n+ RUNNING = 'running'\n+ STOPPED = 'stopped' # Monit will likely try to start the process soon.\n+ UNMONITORED = 'unmonitored'\n+\n+\n # AppScale home directory.\n APPSCALE_HOME = os.environ.get(\"APPSCALE_HOME\", \"/root/appscale\")\n \n" }, { "change_type": "MODIFY", "old_path": "common/appscale/common/monit_interface.py", "new_path": "common/appscale/common/monit_interface.py", "diff": "@@ -1,8 +1,18 @@\n import logging\n import subprocess\n import time\n+import urllib\n+from datetime import timedelta\n+from xml.etree import ElementTree\n \n+from tornado import gen\n+from tornado.httpclient import AsyncHTTPClient\n+from tornado.httpclient import HTTPError\n+from tornado.ioloop import IOLoop\n+\n+from . import constants\n from . import misc\n+from .constants import MonitStates\n \n \"\"\" \n This file contains top level functions for starting and stopping \n@@ -46,6 +56,7 @@ def run_with_retry(args):\n \n return False\n \n+\n def start(watch, is_group=True):\n \"\"\" Instructs monit to start the given program, assuming that a configuration\n file has already been written for it.\n@@ -76,6 +87,7 @@ def start(watch, is_group=True):\n run_with_retry([MONIT, 'monitor', watch])\n return run_with_retry([MONIT, 'start', watch])\n \n+\n def stop(watch, is_group=True):\n \"\"\" Shut down the named programs monit is watching, and stop monitoring it.\n \n@@ -101,6 +113,7 @@ def stop(watch, is_group=True):\n \n return run_with_retry(stop_command)\n \n+\n def restart(watch):\n \"\"\" Instructs monit to restart all processes hosting the given watch.\n \n@@ -118,3 +131,129 @@ def restart(watch):\n logging.info(\"Restarting watch {0}\".format(watch))\n return run_with_retry([MONIT, 'restart', '-g', watch])\n \n+\n+def process_status(response, process_name):\n+ \"\"\" Extracts a watch's status from a Monit response.\n+\n+ Args:\n+ response: An XML string\n+ \"\"\"\n+ root = ElementTree.XML(response)\n+ for service in root.iter('service'):\n+ name = service.find('name').text\n+ if name != process_name:\n+ continue\n+\n+ monitored = int(service.find('monitor').text)\n+ status = int(service.find('status').text)\n+ if monitored == 0:\n+ return constants.MonitStates.UNMONITORED\n+ elif monitored == 1:\n+ if status == 0:\n+ return constants.MonitStates.RUNNING\n+ else:\n+ return constants.MonitStates.STOPPED\n+ else:\n+ return constants.MonitStates.PENDING\n+\n+ return constants.MonitStates.MISSING\n+\n+\n+class MonitOperator(object):\n+ \"\"\" Handles Monit operations. \"\"\"\n+\n+ # The location of Monit's XML API.\n+ LOCATION = 'http://localhost:2812'\n+\n+ # The number of seconds to wait between each reload operation.\n+ RELOAD_COOLDOWN = 1\n+\n+ def __init__(self):\n+ \"\"\" Creates a new MonitOperator. There should only be one. \"\"\"\n+ self.reload_future = None\n+ self.client = AsyncHTTPClient()\n+ self.last_reload = time.time()\n+\n+ @gen.coroutine\n+ def reload(self):\n+ \"\"\" Groups closely-timed reload operations. \"\"\"\n+ if self.reload_future is None or self.reload_future.done():\n+ self.reload_future = self._reload()\n+\n+ yield self.reload_future\n+\n+ @gen.coroutine\n+ def get_status(self, process_name):\n+ \"\"\" Retrieves the status of a given process.\n+\n+ Args:\n+ process_name: A string specifying a monit watch.\n+ Returns:\n+ A string specifying the current status.\n+ \"\"\"\n+ status_url = '{}/_status?format=xml'.format(self.LOCATION)\n+ response = yield self.client.fetch(status_url)\n+ raise gen.Return(process_status(response.body, process_name))\n+\n+ @gen.coroutine\n+ def send_command(self, process_name, command):\n+ \"\"\" Sends a command to the Monit API.\n+\n+ Args:\n+ process_name: A string specifying a monit watch.\n+ command: A string specifying the command to send.\n+ \"\"\"\n+ process_url = '{}/{}'.format(self.LOCATION, process_name)\n+ payload = urllib.urlencode({'action': command})\n+ while True:\n+ try:\n+ yield self.client.fetch(process_url, method='POST', body=payload)\n+ return\n+ except HTTPError:\n+ yield gen.sleep(.2)\n+\n+ @gen.coroutine\n+ def wait_for_status(self, process_name, acceptable_states):\n+ \"\"\" Waits until a process is in a desired state.\n+\n+ Args:\n+ process_name: A string specifying a monit watch.\n+ acceptable_states: An iterable of strings specifying states.\n+ \"\"\"\n+ while True:\n+ status = yield self.get_status(process_name)\n+ if status in acceptable_states:\n+ raise gen.Return(status)\n+ yield gen.sleep(.2)\n+\n+ @gen.coroutine\n+ def ensure_running(self, process_name):\n+ \"\"\" Waits for a process to finish starting.\n+\n+ Args:\n+ process_name: A string specifying a monit watch.\n+ \"\"\"\n+ while True:\n+ non_missing_states = (\n+ MonitStates.RUNNING, MonitStates.UNMONITORED, MonitStates.PENDING,\n+ MonitStates.STOPPED)\n+ status_future = self.wait_for_status(process_name, non_missing_states)\n+ status = yield gen.with_timeout(timedelta(seconds=5), status_future,\n+ IOLoop.current())\n+\n+ if status == constants.MonitStates.RUNNING:\n+ raise gen.Return()\n+\n+ if status == constants.MonitStates.UNMONITORED:\n+ yield self.send_command(process_name, 'start')\n+\n+ yield gen.sleep(1)\n+\n+ @gen.coroutine\n+ def _reload(self):\n+ \"\"\" Reloads Monit. \"\"\"\n+ time_since_reload = time.time() - self.last_reload\n+ wait_time = max(self.RELOAD_COOLDOWN - time_since_reload, 0)\n+ yield gen.sleep(wait_time)\n+ self.last_reload = time.time()\n+ subprocess.check_call(['monit', 'reload'])\n" } ]
bceb7f05916e43611303c87a34c9062e275711ba
appscale/gts
19.06.2017 21:10:25
Apache License 2.0
Allow DeploymentConfig to take a KazooClient This makes it easier to reuse the KazooClient for other things.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/scripts/blobstore.py", "new_path": "AppDB/appscale/datastore/scripts/blobstore.py", "diff": "@@ -31,6 +31,7 @@ from appscale.common.constants import LOG_FORMAT\n from appscale.common.deployment_config import DeploymentConfig\n from appscale.common.deployment_config import ConfigInaccessible\n from appscale.common.unpackaged import APPSCALE_PYTHON_APPSERVER\n+from kazoo.client import KazooClient\n from StringIO import StringIO\n \n sys.path.append(APPSCALE_PYTHON_APPSERVER)\n@@ -423,7 +424,10 @@ def main():\n args = parser.parse_args()\n \n datastore_path = args.datastore_path\n- deployment_config = DeploymentConfig(appscale_info.get_zk_locations_string())\n+ zk_ips = appscale_info.get_zk_node_ips()\n+ zk_client = KazooClient(hosts=','.join(zk_ips))\n+ zk_client.start()\n+ deployment_config = DeploymentConfig(zk_client)\n setup_env()\n \n http_server = tornado.httpserver.HTTPServer(\n" }, { "change_type": "MODIFY", "old_path": "AppManager/app_manager_server.py", "new_path": "AppManager/app_manager_server.py", "diff": "@@ -17,6 +17,7 @@ import urllib\n import urllib2\n from xml.etree import ElementTree\n \n+from kazoo.client import KazooClient\n from M2Crypto import SSL\n from tornado.httpclient import HTTPClient\n from tornado.httpclient import HTTPError\n@@ -847,7 +848,11 @@ def is_config_valid(config):\n ################################\n if __name__ == \"__main__\":\n file_io.set_logging_format()\n- deployment_config = DeploymentConfig(appscale_info.get_zk_locations_string())\n+\n+ zk_ips = appscale_info.get_zk_node_ips()\n+ zk_client = KazooClient(hosts=','.join(zk_ips))\n+ zk_client.start()\n+ deployment_config = DeploymentConfig(zk_client)\n \n INTERNAL_IP = appscale_info.get_private_ip()\n SERVER = SOAPpy.SOAPServer((INTERNAL_IP, constants.APP_MANAGER_PORT))\n" }, { "change_type": "MODIFY", "old_path": "common/appscale/common/deployment_config.py", "new_path": "common/appscale/common/deployment_config.py", "diff": "@@ -2,7 +2,6 @@ import json\n import logging\n import time\n \n-from kazoo.client import KazooClient\n from kazoo.client import KazooException\n from kazoo.client import KazooState\n from kazoo.client import NoNodeError\n@@ -34,19 +33,18 @@ class DeploymentConfig(object):\n # The ZooKeeper node where configuration is stored.\n CONFIG_ROOT = '/appscale/config'\n \n- def __init__(self, hosts):\n+ def __init__(self, zk_client):\n \"\"\" Creates new DeploymentConfig object.\n \n Args:\n- hosts: A list of ZooKeeper hosts.\n+ zk_client: A KazooClient.\n \"\"\"\n self.logger = logging.getLogger(self.__class__.__name__)\n self.update_lock = Lock()\n self.state = ConfigStates.LOADING\n self.config = {}\n- self.conn = KazooClient(hosts=hosts, read_only=True)\n+ self.conn = zk_client\n self.conn.add_listener(self._conn_listener)\n- self.conn.start()\n self.conn.ensure_path(self.CONFIG_ROOT)\n self.conn.ChildrenWatch(self.CONFIG_ROOT, func=self._update_config)\n \n" }, { "change_type": "MODIFY", "old_path": "scripts/setup_cassandra_config_files.py", "new_path": "scripts/setup_cassandra_config_files.py", "diff": "@@ -7,6 +7,8 @@ import os\n import pkgutil\n import sys\n \n+from kazoo.client import KazooClient\n+\n from appscale.common import appscale_info\n from appscale.common.deployment_config import DeploymentConfig\n from appscale.common.deployment_config import InvalidConfig\n@@ -28,7 +30,9 @@ if __name__ == \"__main__\":\n args = parser.parse_args()\n zk_locations = args.zk_locations if args.zk_locations else \\\n appscale_info.get_zk_locations_string()\n- deployment_config = DeploymentConfig(zk_locations)\n+ zk_client = KazooClient(hosts=zk_locations)\n+ zk_client.start()\n+ deployment_config = DeploymentConfig(zk_client)\n cassandra_config = deployment_config.get_config('cassandra')\n if 'num_tokens' not in cassandra_config:\n raise InvalidConfig('num_tokens not specified in deployment config.')\n" } ]
f3c9734ee6177ae5897fdce63676d2f8886902d8
appscale/gts
19.07.2017 15:52:08
Apache License 2.0
Reduce unnecessary "exists" watches This allows managers to stop watches if it's clear they are no longer needed.
[ { "change_type": "MODIFY", "old_path": "AdminServer/appscale/admin/push_worker_manager.py", "new_path": "AdminServer/appscale/admin/push_worker_manager.py", "diff": "@@ -5,6 +5,7 @@ import json\n import os\n from datetime import timedelta\n \n+from kazoo.exceptions import ZookeeperError\n from tornado import gen\n from tornado.ioloop import IOLoop\n from tornado.options import options\n@@ -57,11 +58,13 @@ class ProjectPushWorkerManager(object):\n monit_operator: A MonitOperator.\n project_id: A string specifying a project ID.\n \"\"\"\n+ self.zk_client = zk_client\n self.project_id = project_id\n self.monit_operator = monit_operator\n- queues_node = '/appscale/projects/{}/queues'.format(project_id)\n- self.watch = zk_client.DataWatch(queues_node, self._update_worker)\n+ self.queues_node = '/appscale/projects/{}/queues'.format(project_id)\n+ self.watch = zk_client.DataWatch(self.queues_node, self._update_worker)\n self.monit_watch = 'celery-{}'.format(project_id)\n+ self._stopped = False\n \n @gen.coroutine\n def update_worker(self, queue_config):\n@@ -118,6 +121,13 @@ class ProjectPushWorkerManager(object):\n '-Ofair'\n ])\n \n+ def ensure_watch(self):\n+ \"\"\" Restart the watch if it has been cancelled. \"\"\"\n+ if self._stopped:\n+ self._stopped = False\n+ self.watch = self.zk_client.DataWatch(self.queues_node,\n+ self._update_worker)\n+\n @gen.coroutine\n def _wait_for_stable_state(self):\n \"\"\" Waits until the worker's state is not pending. \"\"\"\n@@ -158,11 +168,21 @@ class ProjectPushWorkerManager(object):\n queue_config: A JSON string specifying queue configuration.\n \"\"\"\n main_io_loop = IOLoop.instance()\n- main_io_loop.add_callback(self.update_worker, queue_config)\n \n- def stop(self):\n- \"\"\" Cancels the ZooKeeper watch for the project's queue configuration. \"\"\"\n- self.watch._stopped = True\n+ # Prevent further watches if they are no longer needed.\n+ if queue_config is None:\n+ try:\n+ project_exists = self.zk_client.exists(\n+ '/appscale/projects/{}'.format(self.project_id)) is not None\n+ except ZookeeperError:\n+ # If the project has been deleted, an extra \"exists\" watch will remain.\n+ project_exists = True\n+\n+ if not project_exists:\n+ self._stopped = True\n+ return False\n+\n+ main_io_loop.add_callback(self.update_worker, queue_config)\n \n \n class GlobalPushWorkerManager(object):\n@@ -189,7 +209,6 @@ class GlobalPushWorkerManager(object):\n to_stop = [project for project in self.projects\n if project not in new_project_list]\n for project_id in to_stop:\n- self.projects[project_id].stop()\n del self.projects[project_id]\n \n for new_project_id in new_project_list:\n@@ -197,6 +216,9 @@ class GlobalPushWorkerManager(object):\n self.projects[new_project_id] = ProjectPushWorkerManager(\n self.zk_client, self.monit_operator, new_project_id)\n \n+ # Handle changes that happen between watches.\n+ self.projects[new_project_id].ensure_watch()\n+\n def _update_projects(self, new_projects):\n \"\"\" Handles creation and deletion of projects.\n \n" }, { "change_type": "MODIFY", "old_path": "AppTaskQueue/appscale/taskqueue/queue_manager.py", "new_path": "AppTaskQueue/appscale/taskqueue/queue_manager.py", "diff": "@@ -2,6 +2,7 @@\n \n import json\n \n+from kazoo.exceptions import ZookeeperError\n from tornado.ioloop import IOLoop\n \n from appscale.taskqueue.utils import create_celery_for_app\n@@ -21,12 +22,15 @@ class ProjectQueueManager(dict):\n project_id: A string specifying a project ID.\n \"\"\"\n super(ProjectQueueManager, self).__init__()\n+ self.zk_client = zk_client\n self.project_id = project_id\n self.db_access = db_access\n- queues_node = '/appscale/projects/{}/queues'.format(project_id)\n- self.watch = zk_client.DataWatch(queues_node, self._update_queues_watch)\n+ self.queues_node = '/appscale/projects/{}/queues'.format(project_id)\n+ self.watch = zk_client.DataWatch(self.queues_node,\n+ self._update_queues_watch)\n self.celery = None\n self.rates = None\n+ self._stopped = False\n \n def update_queues(self, queue_config):\n \"\"\" Caches new configuration details and cleans up old state.\n@@ -73,9 +77,15 @@ class ProjectQueueManager(dict):\n for queue in push_queues:\n queue.celery = self.celery\n \n+ def ensure_watch(self):\n+ \"\"\" Restart the watch if it has been cancelled. \"\"\"\n+ if self._stopped:\n+ self._stopped = False\n+ self.watch = self.zk_client.DataWatch(self.queues_node,\n+ self._update_queues_watch)\n+\n def stop(self):\n- \"\"\" Removes all cached queue configuration and closes connection. \"\"\"\n- self.watch._stopped = True\n+ \"\"\" Close the Celery connections if they still exist. \"\"\"\n if self.celery is not None:\n self.celery.close()\n \n@@ -89,6 +99,20 @@ class ProjectQueueManager(dict):\n queue_config: A JSON string specifying queue configuration.\n \"\"\"\n main_io_loop = IOLoop.instance()\n+\n+ # Prevent further watches if they are no longer needed.\n+ if queue_config is None:\n+ try:\n+ project_exists = self.zk_client.exists(\n+ '/appscale/projects/{}'.format(self.project_id)) is not None\n+ except ZookeeperError:\n+ # If the project has been deleted, an extra \"exists\" watch will remain.\n+ project_exists = True\n+\n+ if not project_exists:\n+ self._stopped = True\n+ return False\n+\n main_io_loop.add_callback(self.update_queues, queue_config)\n \n \n@@ -124,6 +148,9 @@ class GlobalQueueManager(dict):\n self[project_id] = ProjectQueueManager(self.zk_client, self.db_access,\n project_id)\n \n+ # Handle changes that happen between watches.\n+ self[project_id].ensure_watch()\n+\n def _update_projects_watch(self, new_projects):\n \"\"\" Handles creation and deletion of projects.\n \n" }, { "change_type": "MODIFY", "old_path": "common/appscale/common/deployment_config.py", "new_path": "common/appscale/common/deployment_config.py", "diff": "@@ -37,15 +37,20 @@ class DeploymentConfigSection(object):\n section: A string specifying a configuration section name.\n \"\"\"\n self.logger = logging.getLogger(self.__class__.__name__)\n+ self.zk_client = zk_client\n self.section_name = section\n self.data = {}\n+ self._stopped = False\n \n- section_node = '/appscale/config/{}'.format(section)\n- self.watch = zk_client.DataWatch(section_node, self._update_section)\n+ self.section_node = '/appscale/config/{}'.format(section)\n+ self.watch = zk_client.DataWatch(self.section_node, self._update_section)\n \n- def stop(self):\n- \"\"\" Stops the DataWatch on the configuration section. \"\"\"\n- self.watch._stopped = True\n+ def ensure_watch(self):\n+ \"\"\" Restart the watch if it has been cancelled. \"\"\"\n+ if self._stopped:\n+ self._stopped = False\n+ self.watch = self.zk_client.DataWatch(self.section_node,\n+ self._update_section)\n \n def _update_section(self, section_data, _):\n \"\"\" Updates the configuration data when the section node gets updated.\n@@ -53,6 +58,11 @@ class DeploymentConfigSection(object):\n Args:\n section_data: A JSON string specifying configuration data.\n \"\"\"\n+ # If the section no longer exists, stop watching it.\n+ if section_data is None:\n+ self._stopped = True\n+ return False\n+\n try:\n self.data = json.loads(section_data)\n except ValueError:\n@@ -130,15 +140,15 @@ class DeploymentConfig(object):\n to_remove = [section for section in self.config\n if section not in children]\n for section_name in to_remove:\n- self.config[section_name].stop()\n del self.config[section_name]\n \n # Add new configuration sections.\n for child in children:\n- if child in self.config:\n- continue\n+ if child not in self.config:\n+ self.config[child] = DeploymentConfigSection(self.conn, child)\n \n- self.config[child] = DeploymentConfigSection(self.conn, child)\n+ # Handle changes that happen between watches.\n+ self.config[child].ensure_watch()\n \n self.logger.info('Deployment configuration updated')\n self.state = ConfigStates.LOADED\n" } ]
2f910152193af3ef69ce16e062832433c1bf75db
appscale/gts
14.09.2017 17:00:07
Apache License 2.0
Implement the datastore_v4.AllocateIds API This allows clients to ensure that a list of entity IDs are never re-allocated.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/cassandra_env/entity_id_allocator.py", "new_path": "AppDB/appscale/datastore/cassandra_env/entity_id_allocator.py", "diff": "@@ -48,6 +48,10 @@ class EntityIDAllocator(object):\n else:\n self.max_allowed = _MAX_SEQUENTIAL_COUNTER\n \n+ # Allows the allocator to avoid making unnecessary Cassandra requests when\n+ # setting the minimum counter value.\n+ self._last_reserved_cache = None\n+\n def _ensure_entry(self, retries=5):\n \"\"\" Ensures an entry exists for a reservation.\n \n@@ -90,6 +94,7 @@ class EntityIDAllocator(object):\n self._ensure_entry()\n return self._get_last_reserved()\n \n+ self._last_reserved_cache = result.last_reserved\n return result.last_reserved\n \n def _get_last_op_id(self):\n@@ -138,12 +143,15 @@ class EntityIDAllocator(object):\n if not result.was_applied:\n raise ReservationFailed('Last reserved value changed')\n \n- def allocate_size(self, size, retries=5):\n+ self._last_reserved_cache = new_reserved\n+\n+ def allocate_size(self, size, retries=5, min_counter=None):\n \"\"\" Reserve a block of IDs for this project.\n \n Args:\n size: The number of IDs to reserve.\n retries: The number of times to retry the reservation.\n+ min_counter: The minimum counter value that should be reserved.\n Returns:\n A tuple of integers specifying the start and end ID.\n Raises:\n@@ -158,7 +166,11 @@ class EntityIDAllocator(object):\n except TRANSIENT_CASSANDRA_ERRORS:\n raise AppScaleDBConnectionError('Unable to get last reserved ID')\n \n- new_reserved = last_reserved + size\n+ if min_counter is None:\n+ new_reserved = last_reserved + size\n+ else:\n+ new_reserved = max(last_reserved, min_counter) + size\n+\n if new_reserved > self.max_allowed:\n raise AppScaleBadArg('Exceeded maximum allocated IDs')\n \n@@ -195,7 +207,7 @@ class EntityIDAllocator(object):\n raise AppScaleDBConnectionError('Unable to get last reserved ID')\n \n # Instead of returning an error, the API returns an invalid range.\n- if last_reserved > max_id:\n+ if last_reserved >= max_id:\n return last_reserved + 1, last_reserved\n \n try:\n@@ -207,6 +219,18 @@ class EntityIDAllocator(object):\n end_id = max_id\n return start_id, end_id\n \n+ def set_min_counter(self, counter):\n+ \"\"\" Ensures the counter is at least as large as the given value.\n+\n+ Args:\n+ counter: An integer specifying the minimum counter value.\n+ \"\"\"\n+ if (self._last_reserved_cache is not None and\n+ self._last_reserved_cache >= counter):\n+ return\n+\n+ self.allocate_max(counter)\n+\n \n class ScatteredAllocator(EntityIDAllocator):\n \"\"\" An iterator that generates evenly-distributed entity IDs. \"\"\"\n@@ -241,3 +265,26 @@ class ScatteredAllocator(EntityIDAllocator):\n next_id = ToScatteredId(self.start_id)\n self.start_id += 1\n return next_id\n+\n+ def set_min_counter(self, counter):\n+ \"\"\" Ensures the counter is at least as large as the given value.\n+\n+ Args:\n+ counter: An integer specifying the minimum counter value.\n+ \"\"\"\n+ if self.start_id is not None and self.start_id >= counter:\n+ return\n+\n+ if self.end_id is not None and self.end_id > counter:\n+ self.start_id = max(self.start_id, counter)\n+\n+ if self.start_id is None:\n+ if (self._last_reserved_cache is not None and\n+ self._last_reserved_cache >= counter):\n+ return\n+\n+ self.allocate_max(counter)\n+ return\n+\n+ self.start_id, self.end_id = self.allocate_size(DEFAULT_RESERVATION_SIZE,\n+ min_counter=counter)\n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/datastore_distributed.py", "new_path": "AppDB/appscale/datastore/datastore_distributed.py", "diff": "@@ -44,6 +44,8 @@ from google.appengine.datastore import datastore_pb\n from google.appengine.datastore import datastore_index\n from google.appengine.datastore import entity_pb\n from google.appengine.datastore import sortable_pb_encoder\n+from google.appengine.datastore.datastore_stub_util import IdToCounter\n+from google.appengine.datastore.datastore_stub_util import SEQUENTIAL\n from google.appengine.runtime import apiproxy_errors\n from google.appengine.ext import db\n from google.appengine.ext.db.metadata import Namespace\n@@ -135,6 +137,9 @@ class DatastoreDistributed():\n # Maintain a scattered allocator for each project.\n self.scattered_allocators = {}\n \n+ # Maintain a sequential allocator for each project.\n+ self.sequential_allocators = {}\n+\n def get_limit(self, query):\n \"\"\" Returns the limit that should be used for the given query.\n \n@@ -470,7 +475,11 @@ class DatastoreDistributed():\n Returns:\n A tuple of integers specifying the start and end ID.\n \"\"\"\n- allocator = EntityIDAllocator(self.datastore_batch.session, project)\n+ if project not in self.sequential_allocators:\n+ self.sequential_allocators[project] = EntityIDAllocator(\n+ self.datastore_batch.session, project)\n+\n+ allocator = self.sequential_allocators[project]\n return allocator.allocate_size(size)\n \n def allocate_max(self, project, max_id):\n@@ -482,9 +491,37 @@ class DatastoreDistributed():\n Returns:\n A tuple of integers specifying the start and end ID.\n \"\"\"\n- allocator = EntityIDAllocator(self.datastore_batch.session, project)\n+ if project not in self.sequential_allocators:\n+ self.sequential_allocators[project] = EntityIDAllocator(\n+ self.datastore_batch.session, project)\n+\n+ allocator = self.sequential_allocators[project]\n return allocator.allocate_max(max_id)\n \n+ def reserve_ids(self, project_id, ids):\n+ \"\"\" Ensures the given IDs are not re-allocated.\n+\n+ Args:\n+ project_id: A string specifying the project ID.\n+ ids: An iterable of integers specifying entity IDs.\n+ \"\"\"\n+ if project_id not in self.sequential_allocators:\n+ self.sequential_allocators[project_id] = EntityIDAllocator(\n+ self.datastore_batch.session, project_id)\n+\n+ if project_id not in self.scattered_allocators:\n+ self.scattered_allocators[project_id] = ScatteredAllocator(\n+ self.datastore_batch.session, project_id)\n+\n+ for id_ in ids:\n+ counter, space = IdToCounter(id_)\n+ if space == SEQUENTIAL:\n+ allocator = self.sequential_allocators[project_id]\n+ else:\n+ allocator = self.scattered_allocators[project_id]\n+\n+ allocator.set_min_counter(counter)\n+\n def put_entities(self, app, entities, composite_indexes=()):\n \"\"\" Updates indexes of existing entities, inserts new entities and \n indexes for them.\n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/scripts/datastore.py", "new_path": "AppDB/appscale/datastore/scripts/datastore.py", "diff": "@@ -30,6 +30,7 @@ sys.path.append(APPSCALE_PYTHON_APPSERVER)\n from google.appengine.api import api_base_pb\n from google.appengine.api.taskqueue import taskqueue_service_pb\n from google.appengine.datastore import datastore_pb\n+from google.appengine.datastore import datastore_v4_pb\n from google.appengine.datastore import entity_pb\n from google.appengine.ext.remote_api import remote_api_pb\n \n@@ -226,6 +227,9 @@ class MainHandler(tornado.web.RequestHandler):\n elif method == 'AddActions':\n response, errcode, errdetail = self.add_actions_request(\n app_id, http_request_data)\n+ elif method == 'datastore_v4.AllocateIds':\n+ response, errcode, errdetail = self.v4_allocate_ids_request(\n+ app_id, http_request_data)\n else:\n errcode = datastore_pb.Error.BAD_REQUEST \n errdetail = \"Unknown datastore message\" \n@@ -539,6 +543,27 @@ class MainHandler(tornado.web.RequestHandler):\n response.set_end(end)\n return response.Encode(), 0, \"\"\n \n+ def v4_allocate_ids_request(self, app_id, http_request_data):\n+ \"\"\" Reserves entity IDs so that they will not be re-allocated.\n+\n+ Args:\n+ app_id: Name of the application.\n+ http_request_data: The protocol buffer request from the AppServer.\n+ Returns:\n+ Returns an encoded response.\n+ \"\"\"\n+ request = datastore_v4_pb.AllocateIdsRequest(http_request_data)\n+ response = datastore_v4_pb.AllocateIdsResponse()\n+\n+ if not request.reserve_list():\n+ return (response.Encode(), datastore_v4_pb.Error.BAD_REQUEST,\n+ 'Request must include reserve list')\n+\n+ ids = [key.path_element_list()[-1].id() for key in request.reserve_list()]\n+ datastore_access.reserve_ids(app_id, ids)\n+\n+ return response.Encode(), 0, ''\n+\n def put_request(self, app_id, http_request_data):\n \"\"\" High level function for doing puts.\n \n" } ]
7ed0b36c3d0414f87a82efdbdf615dec7c97b71e
appscale/gts
06.11.2017 11:47:00
Apache License 2.0
Simplify datastore error handling If an error code is defined, the response body does not need to be specified. This prevents encoding issues in cases when a response message has required fields.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/datastore_distributed.py", "new_path": "AppDB/appscale/datastore/datastore_distributed.py", "diff": "@@ -3327,30 +3327,27 @@ class DatastoreDistributed():\n Returns:\n An encoded protocol buffer commit response.\n \"\"\"\n- commitres_pb = datastore_pb.CommitResponse()\n transaction_pb = datastore_pb.Transaction(http_request_data)\n txn_id = transaction_pb.handle()\n \n try:\n self.apply_txn_changes(app_id, txn_id)\n except dbconstants.TxTimeoutException as timeout:\n- return commitres_pb.Encode(), datastore_pb.Error.TIMEOUT, str(timeout)\n+ return '', datastore_pb.Error.TIMEOUT, str(timeout)\n except dbconstants.AppScaleDBConnectionError:\n self.logger.exception('DB connection error during commit')\n- return (commitres_pb.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n 'Datastore connection error on Commit request.')\n except dbconstants.ConcurrentModificationException as error:\n- return (commitres_pb.Encode(), datastore_pb.Error.CONCURRENT_TRANSACTION,\n- str(error))\n+ return '', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)\n except dbconstants.TooManyGroupsException as error:\n- return (commitres_pb.Encode(), datastore_pb.Error.BAD_REQUEST,\n- str(error))\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except entity_lock.LockTimeout as error:\n- return (commitres_pb.Encode(), datastore_pb.Error.TIMEOUT,\n- str(error))\n+ return '', datastore_pb.Error.TIMEOUT, str(error)\n \n self.zookeeper.remove_tx_node(app_id, txn_id)\n- return commitres_pb.Encode(), 0, \"\"\n+ commitres_pb = datastore_pb.CommitResponse()\n+ return commitres_pb.Encode(), 0, ''\n \n def rollback_transaction(self, app_id, http_request_data):\n \"\"\" Handles the rollback phase of a transaction.\n@@ -3366,13 +3363,11 @@ class DatastoreDistributed():\n 'Doing a rollback on transaction {} for {}'.format(txn.handle(), app_id))\n try:\n self.zookeeper.notify_failed_transaction(app_id, txn.handle())\n- return (api_base_pb.VoidProto().Encode(), 0, \"\")\n+ return api_base_pb.VoidProto().Encode(), 0, ''\n except zktransaction.ZKTransactionException as zkte:\n self.logger.exception('Unable to rollback {} for {}'.\n format(txn, app_id))\n- return (api_base_pb.VoidProto().Encode(),\n- datastore_pb.Error.PERMISSION_DENIED, \n- \"Unable to rollback for this transaction: {0}\".format(str(zkte)))\n+ return '', datastore_pb.Error.PERMISSION_DENIED, str(zkte)\n \n def _zk_state_listener(self, state):\n \"\"\" Handles changes to the ZooKeeper connection state.\n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/scripts/datastore.py", "new_path": "AppDB/appscale/datastore/scripts/datastore.py", "diff": "@@ -191,16 +191,9 @@ class MainHandler(tornado.web.RequestHandler):\n apirequest.ParseFromString(http_request_data)\n apiresponse = remote_api_pb.Response()\n response = None\n- errcode = 0\n- errdetail = \"\"\n- apperror_pb = None\n- if not apirequest.has_method(): \n- errcode = datastore_pb.Error.BAD_REQUEST\n- errdetail = \"Method was not set in request\"\n+ if not apirequest.has_method():\n apirequest.set_method(\"NOT_FOUND\")\n if not apirequest.has_request():\n- errcode = datastore_pb.Error.BAD_REQUEST\n- errdetail = \"Request missing in call\"\n apirequest.set_method(\"NOT_FOUND\")\n apirequest.clear_request()\n method = apirequest.method()\n@@ -295,26 +288,23 @@ class MainHandler(tornado.web.RequestHandler):\n if begin_transaction_req_pb.has_allow_multiple_eg():\n multiple_eg = bool(begin_transaction_req_pb.allow_multiple_eg())\n \n- handle = None\n- transaction_pb = datastore_pb.Transaction()\n-\n if READ_ONLY:\n logger.warning('Unable to begin transaction in read-only mode: {}'.\n format(begin_transaction_req_pb))\n- return (transaction_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n handle = datastore_access.setup_transaction(app_id, multiple_eg)\n except (zktransaction.ZKInternalException,\n dbconstants.AppScaleDBConnectionError) as error:\n logger.exception('Unable to begin transaction')\n- return (transaction_pb.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n- str(error))\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n \n+ transaction_pb = datastore_pb.Transaction()\n transaction_pb.set_app(app_id)\n transaction_pb.set_handle(handle)\n- return (transaction_pb.Encode(), 0, \"\")\n+ return transaction_pb.Encode(), 0, ''\n \n def commit_transaction_request(self, app_id, http_request_data):\n \"\"\" Handles the commit phase of a transaction.\n@@ -328,12 +318,11 @@ class MainHandler(tornado.web.RequestHandler):\n global datastore_access\n \n if READ_ONLY:\n- commitres_pb = datastore_pb.CommitResponse()\n transaction_pb = datastore_pb.Transaction(http_request_data)\n logger.warning('Unable to commit in read-only mode: {}'.\n format(transaction_pb))\n- return (commitres_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n return datastore_access.commit_transaction(app_id, http_request_data)\n \n@@ -347,26 +336,22 @@ class MainHandler(tornado.web.RequestHandler):\n An encoded protocol buffer void response.\n \"\"\"\n global datastore_access\n- response = api_base_pb.VoidProto()\n \n if READ_ONLY:\n logger.warning('Unable to rollback in read-only mode: {}'.\n format(http_request_data))\n- return (response.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n return datastore_access.rollback_transaction(app_id, http_request_data)\n- except zktransaction.ZKInternalException:\n+ except zktransaction.ZKInternalException as error:\n logger.exception('ZKInternalException during {} for {}'.\n format(http_request_data, app_id))\n- return (response.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n- \"Internal error with ZooKeeper connection.\")\n- except Exception:\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except Exception as error:\n logger.exception('Unable to rollback transaction')\n- return(response.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Unable to rollback for this transaction\")\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n \n def run_query(self, http_request_data):\n \"\"\" High level function for running queries.\n@@ -381,31 +366,21 @@ class MainHandler(tornado.web.RequestHandler):\n clone_qr_pb = UnprocessedQueryResult()\n try:\n datastore_access._dynamic_run_query(query, clone_qr_pb)\n- except zktransaction.ZKBadRequest, zkie:\n+ except zktransaction.ZKBadRequest as error:\n logger.exception('Illegal arguments in transaction during {}'.\n format(query))\n- return (clone_qr_pb.Encode(),\n- datastore_pb.Error.BAD_REQUEST, \n- \"Illegal arguments for transaction. {0}\".format(str(zkie)))\n- except zktransaction.ZKInternalException:\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n+ except zktransaction.ZKInternalException as error:\n logger.exception('ZKInternalException during {}'.format(query))\n- clone_qr_pb.set_more_results(False)\n- return (clone_qr_pb.Encode(), \n- datastore_pb.Error.INTERNAL_ERROR, \n- \"Internal error with ZooKeeper connection.\")\n- except zktransaction.ZKTransactionException:\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except zktransaction.ZKTransactionException as error:\n logger.exception('Concurrent transaction during {}'.format(query))\n- clone_qr_pb.set_more_results(False)\n- return (clone_qr_pb.Encode(), \n- datastore_pb.Error.CONCURRENT_TRANSACTION, \n- \"Concurrent transaction exception on put.\")\n- except dbconstants.AppScaleDBConnectionError:\n+ return '', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)\n+ except dbconstants.AppScaleDBConnectionError as error:\n logger.exception('DB connection error during query')\n- clone_qr_pb.set_more_results(False)\n- return (clone_qr_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on run_query request.\")\n- return clone_qr_pb.Encode(), 0, \"\"\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n+ return clone_qr_pb.Encode(), 0, ''\n \n def create_index_request(self, app_id, http_request_data):\n \"\"\" High level function for creating composite indexes.\n@@ -424,19 +399,17 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to create in read-only mode: {}'.\n format(request))\n- return (response.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n index_id = datastore_access.create_composite_index(app_id, request)\n response.set_value(index_id)\n- except dbconstants.AppScaleDBConnectionError:\n+ except dbconstants.AppScaleDBConnectionError as error:\n logger.exception('DB connection error during index creation')\n- response.set_value(0)\n- return (response.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on create index request.\")\n- return response.Encode(), 0, \"\"\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n+ return response.Encode(), 0, ''\n \n def update_index_request(self, app_id, http_request_data):\n \"\"\" High level function for updating a composite index.\n@@ -455,8 +428,8 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to update in read-only mode: {}'.\n format(index))\n- return (response.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n state = index.state()\n if state not in [index.READ_WRITE, index.WRITE_ONLY]:\n@@ -464,14 +437,13 @@ class MainHandler(tornado.web.RequestHandler):\n error_message = 'Unable to update index because state is {}. '\\\n 'Index: {}'.format(state_name, index)\n logger.error(error_message)\n- return response.Encode(), datastore_pb.Error.PERMISSION_DENIED,\\\n- error_message\n+ return '', datastore_pb.Error.PERMISSION_DENIED, error_message\n else:\n # Updating index asynchronously so we can return a response quickly.\n threading.Thread(target=datastore_access.update_composite_index,\n args=(app_id, index)).start()\n \n- return response.Encode(), 0, \"\"\n+ return response.Encode(), 0, ''\n \n def delete_index_request(self, app_id, http_request_data):\n \"\"\" Deletes a composite index for a given application.\n@@ -490,17 +462,16 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to delete in read-only mode: {}'.\n format(request))\n- return (response.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try: \n datastore_access.delete_composite_index_metadata(app_id, request)\n- except dbconstants.AppScaleDBConnectionError:\n+ except dbconstants.AppScaleDBConnectionError as error:\n logger.exception('DB connection error during index deletion')\n- return (response.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on delete index request.\")\n- return response.Encode(), 0, \"\"\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n+ return response.Encode(), 0, ''\n \n def get_indices_request(self, app_id):\n \"\"\" Gets the indices of the given application.\n@@ -516,16 +487,16 @@ class MainHandler(tornado.web.RequestHandler):\n response = datastore_pb.CompositeIndices()\n try:\n indices = datastore_access.datastore_batch.get_indices(app_id)\n- except dbconstants.AppScaleDBConnectionError:\n+ except dbconstants.AppScaleDBConnectionError as error:\n logger.exception('DB connection error while fetching indices for '\n '{}'.format(app_id))\n- return (response.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on get indices request.\")\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n for index in indices:\n new_index = response.add_index()\n new_index.ParseFromString(index)\n- return response.Encode(), 0, \"\"\n+\n+ return response.Encode(), 0, ''\n \n def allocate_ids_request(self, app_id, http_request_data):\n \"\"\" High level function for getting unique identifiers for entities.\n@@ -540,33 +511,33 @@ class MainHandler(tornado.web.RequestHandler):\n NotImplementedError: when requesting a max id.\n \"\"\"\n request = datastore_pb.AllocateIdsRequest(http_request_data)\n- response = datastore_pb.AllocateIdsResponse()\n \n if request.has_max() and request.has_size():\n- return (response.Encode(), datastore_pb.Error.BAD_REQUEST,\n+ return ('', datastore_pb.Error.BAD_REQUEST,\n 'Both size and max cannot be set.')\n+\n if not (request.has_max() or request.has_size()):\n- return (response.Encode(), datastore_pb.Error.BAD_REQUEST,\n+ return ('', datastore_pb.Error.BAD_REQUEST,\n 'Either size or max must be set.')\n \n if request.has_size():\n- try:\n- start, end = datastore_access.allocate_size(app_id, request.size())\n- except dbconstants.AppScaleBadArg as error:\n- return response.Encode(), datastore_pb.Error.BAD_REQUEST, str(error)\n- except dbconstants.AppScaleDBConnectionError as error:\n- return response.Encode(), datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ method = datastore_access.allocate_size\n+ args = (app_id, request.size())\n else:\n- try:\n- start, end = datastore_access.allocate_max(app_id, request.max())\n- except dbconstants.AppScaleBadArg as error:\n- return response.Encode(), datastore_pb.Error.BAD_REQUEST, str(error)\n- except dbconstants.AppScaleDBConnectionError as error:\n- return response.Encode(), datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ method = datastore_access.allocate_max\n+ args = (app_id, request.max())\n \n+ try:\n+ start, end = method(*args)\n+ except dbconstants.AppScaleBadArg as error:\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n+ except dbconstants.AppScaleDBConnectionError as error:\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+\n+ response = datastore_pb.AllocateIdsResponse()\n response.set_start(start)\n response.set_end(end)\n- return response.Encode(), 0, \"\"\n+ return response.Encode(), 0, ''\n \n @staticmethod\n @gen.coroutine\n@@ -628,34 +599,26 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to put in read-only mode: {}'.\n format(putreq_pb))\n- return (putresp_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n datastore_access.dynamic_put(app_id, putreq_pb, putresp_pb)\n- return (putresp_pb.Encode(), 0, \"\")\n- except zktransaction.ZKBadRequest as zkie:\n- logger.exception('Illegal argument during {}'.format(putreq_pb))\n- return (putresp_pb.Encode(),\n- datastore_pb.Error.BAD_REQUEST, \n- \"Illegal arguments for transaction. {0}\".format(str(zkie)))\n+ return putresp_pb.Encode(), 0, ''\n+ except zktransaction.ZKBadRequest as error:\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except zktransaction.ZKInternalException as error:\n logger.exception('ZKInternalException during put')\n- return (putresp_pb.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n- str(error))\n- except zktransaction.ZKTransactionException:\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except zktransaction.ZKTransactionException as error:\n logger.exception('Concurrent transaction during {}'.\n format(putreq_pb))\n- return (putresp_pb.Encode(),\n- datastore_pb.Error.CONCURRENT_TRANSACTION, \n- \"Concurrent transaction exception on put.\")\n+ return '', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)\n except dbconstants.AppScaleDBConnectionError:\n logger.exception('DB connection error during put')\n- return (putresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on put.\")\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Datastore connection error on put.')\n \n- \n def get_request(self, app_id, http_request_data):\n \"\"\" High level function for doing gets.\n \n@@ -670,29 +633,21 @@ class MainHandler(tornado.web.RequestHandler):\n getresp_pb = datastore_pb.GetResponse()\n try:\n datastore_access.dynamic_get(app_id, getreq_pb, getresp_pb)\n- except zktransaction.ZKBadRequest as zkie:\n+ except zktransaction.ZKBadRequest as error:\n logger.exception('Illegal argument during {}'.format(getreq_pb))\n- return (getresp_pb.Encode(),\n- datastore_pb.Error.BAD_REQUEST, \n- \"Illegal arguments for transaction. {0}\".format(str(zkie)))\n- except zktransaction.ZKInternalException:\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n+ except zktransaction.ZKInternalException as error:\n logger.exception('ZKInternalException during {}'.format(getreq_pb))\n- return (getresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR, \n- \"Internal error with ZooKeeper connection.\")\n- except zktransaction.ZKTransactionException:\n- logger.exception('Concurrent transaction during {}'.\n- format(getreq_pb))\n- return (getresp_pb.Encode(),\n- datastore_pb.Error.CONCURRENT_TRANSACTION, \n- \"Concurrent transaction exception on get.\")\n+ return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except zktransaction.ZKTransactionException as error:\n+ logger.exception('Concurrent transaction during {}'.format(getreq_pb))\n+ return '', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)\n except dbconstants.AppScaleDBConnectionError:\n logger.exception('DB connection error during get')\n- return (getresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on get.\")\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Datastore connection error on get.')\n \n- return getresp_pb.Encode(), 0, \"\"\n+ return getresp_pb.Encode(), 0, ''\n \n def delete_request(self, app_id, http_request_data):\n \"\"\" High level function for doing deletes.\n@@ -711,33 +666,27 @@ class MainHandler(tornado.web.RequestHandler):\n if READ_ONLY:\n logger.warning('Unable to delete in read-only mode: {}'.\n format(delreq_pb))\n- return (delresp_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n datastore_access.dynamic_delete(app_id, delreq_pb)\n- return (delresp_pb.Encode(), 0, \"\")\n- except zktransaction.ZKBadRequest as zkie:\n+ return delresp_pb.Encode(), 0, ''\n+ except zktransaction.ZKBadRequest as error:\n logger.exception('Illegal argument during {}'.format(delreq_pb))\n- return (delresp_pb.Encode(),\n- datastore_pb.Error.BAD_REQUEST, \n- \"Illegal arguments for transaction. {0}\".format(str(zkie)))\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except zktransaction.ZKInternalException:\n logger.exception('ZKInternalException during {}'.format(delreq_pb))\n- return (delresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR, \n- \"Internal error with ZooKeeper connection.\")\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Internal error with ZooKeeper connection.')\n except zktransaction.ZKTransactionException:\n- logger.exception('Concurrent transaction during {}'.\n- format(delreq_pb))\n- return (delresp_pb.Encode(),\n- datastore_pb.Error.CONCURRENT_TRANSACTION, \n- \"Concurrent transaction exception on delete.\")\n+ logger.exception('Concurrent transaction during {}'.format(delreq_pb))\n+ return ('', datastore_pb.Error.CONCURRENT_TRANSACTION,\n+ 'Concurrent transaction exception on delete.')\n except dbconstants.AppScaleDBConnectionError:\n logger.exception('DB connection error during delete')\n- return (delresp_pb.Encode(),\n- datastore_pb.Error.INTERNAL_ERROR,\n- \"Datastore connection error on delete.\")\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Datastore connection error on delete.')\n \n def add_actions_request(self, app_id, http_request_data, service_id,\n version_id):\n@@ -757,27 +706,27 @@ class MainHandler(tornado.web.RequestHandler):\n resp_pb = taskqueue_service_pb.TaskQueueBulkAddResponse()\n \n if service_id is None:\n- return (resp_pb.Encode(), datastore_pb.Error.BAD_REQUEST,\n+ return ('', datastore_pb.Error.BAD_REQUEST,\n 'Module header must be defined')\n \n if version_id is None:\n- return (resp_pb.Encode(), datastore_pb.Error.BAD_REQUEST,\n+ return ('', datastore_pb.Error.BAD_REQUEST,\n 'Version header must be defined')\n \n if READ_ONLY:\n logger.warning('Unable to add transactional tasks in read-only mode')\n- return (resp_pb.Encode(), datastore_pb.Error.CAPABILITY_DISABLED,\n- 'Datastore is in read-only mode.')\n+ return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n+ 'Datastore is in read-only mode.')\n \n try:\n datastore_access.dynamic_add_actions(app_id, req_pb, service_id,\n version_id)\n- return resp_pb.Encode(), 0, \"\"\n+ return resp_pb.Encode(), 0, ''\n except dbconstants.ExcessiveTasks as error:\n- return (resp_pb.Encode(), datastore_pb.Error.BAD_REQUEST, str(error))\n+ return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except dbconstants.AppScaleDBConnectionError:\n logger.exception('DB connection error')\n- return (resp_pb.Encode(), datastore_pb.Error.INTERNAL_ERROR,\n+ return ('', datastore_pb.Error.INTERNAL_ERROR,\n 'Datastore connection error when adding transaction tasks.')\n \n \n" } ]
15e95dc9e579e7123a4cb78d7347b8340f5fbc27
appscale/gts
07.11.2017 10:35:43
Apache License 2.0
Use the EntityLock in the groomer The allows the groomer to make index modifications under a lock without having to create transaction IDs.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/groomer.py", "new_path": "AppDB/appscale/datastore/groomer.py", "diff": "@@ -20,6 +20,7 @@ from .cassandra_env import cassandra_interface\n from .datastore_distributed import DatastoreDistributed\n from .utils import get_composite_indexes_rows\n from .zkappscale import zktransaction as zk\n+from .zkappscale.entity_lock import EntityLock\n \n sys.path.append(APPSCALE_PYTHON_APPSERVER)\n from google.appengine.api import apiproxy_stub_map\n@@ -235,73 +236,6 @@ class DatastoreGroomer(threading.Thread):\n \n return False\n \n- def acquire_lock_for_key(self, app_id, key, retries, retry_time):\n- \"\"\" Acquires a lock for a given entity key.\n-\n- Args:\n- app_id: The application ID.\n- key: A string containing an entity key.\n- retries: An integer specifying the number of times to retry.\n- retry_time: How many seconds to wait before each retry.\n- Returns:\n- A transaction ID.\n- Raises:\n- ZKTransactionException if unable to acquire a lock from ZooKeeper.\n- \"\"\"\n- root_key = key.split(dbconstants.KIND_SEPARATOR)[0]\n- root_key += dbconstants.KIND_SEPARATOR\n-\n- txn_id = self.zoo_keeper.get_transaction_id(app_id, is_xg=False)\n- try:\n- self.zoo_keeper.acquire_lock(app_id, txn_id, root_key)\n- except zk.ZKTransactionException as zkte:\n- logging.warning('Concurrent transaction exception for app id {} with '\n- 'info {}'.format(app_id, str(zkte)))\n- if retries > 0:\n- logging.info('Trying again to acquire lock info {} with retry #{}'\n- .format(str(zkte), retries))\n- time.sleep(retry_time)\n- return self.acquire_lock_for_key(\n- app_id=app_id,\n- key=key,\n- retries=retries-1,\n- retry_time=retry_time\n- )\n- self.zoo_keeper.notify_failed_transaction(app_id, txn_id)\n- raise zkte\n- return txn_id\n-\n- def release_lock_for_key(self, app_id, key, txn_id, retries, retry_time):\n- \"\"\" Releases a lock for a given entity key.\n-\n- Args:\n- app_id: The application ID.\n- key: A string containing an entity key.\n- txn_id: A transaction ID.\n- retries: An integer specifying the number of times to retry.\n- retry_time: How many seconds to wait before each retry.\n- \"\"\"\n- root_key = key.split(dbconstants.KIND_SEPARATOR)[0]\n- root_key += dbconstants.KIND_SEPARATOR\n-\n- try:\n- self.zoo_keeper.release_lock(app_id, txn_id)\n- except zk.ZKTransactionException as zkte:\n- logging.warning(str(zkte))\n- if retries > 0:\n- logging.info('Trying again to release lock {} with retry #{}'.\n- format(txn_id, retries))\n- time.sleep(retry_time)\n- self.release_lock_for_key(\n- app_id=app_id,\n- key=key,\n- txn_id=txn_id,\n- retries=retries-1,\n- retry_time=retry_time\n- )\n- else:\n- self.zoo_keeper.notify_failed_transaction(app_id, txn_id)\n-\n def fetch_entity_dict_for_references(self, references):\n \"\"\" Fetches a dictionary of valid entities for a list of references.\n \n@@ -337,6 +271,35 @@ class DatastoreGroomer(threading.Thread):\n entities[key] = app_entities[key][dbconstants.APP_ENTITY_SCHEMA[0]]\n return entities\n \n+ def guess_group_from_table_key(self, entity_key):\n+ \"\"\" Construct a group reference based on an entity key.\n+\n+ Args:\n+ entity_key: A string specifying an entity table key.\n+ Returns:\n+ An entity_pb.Reference object specifying the entity group.\n+ \"\"\"\n+ project_id, namespace, path = entity_key.split(dbconstants.KEY_DELIMITER)\n+\n+ group = entity_pb.Reference()\n+ group.set_app(project_id)\n+ if namespace:\n+ group.set_name_space(namespace)\n+\n+ mutable_path = group.mutable_path()\n+ first_element = mutable_path.add_element()\n+ kind, id_ = path.split(dbconstants.KIND_SEPARATOR)[0].split(':')\n+ first_element.set_type(kind)\n+\n+ # At this point, there's no way to tell if the ID was originally a name,\n+ # so this is a guess.\n+ try:\n+ first_element.set_id(int(id_))\n+ except ValueError:\n+ first_element.set_name(id_)\n+\n+ return group\n+\n def lock_and_delete_indexes(self, references, direction, entity_key):\n \"\"\" For a list of index entries that have the same entity, lock the entity\n and delete the indexes.\n@@ -355,45 +318,28 @@ class DatastoreGroomer(threading.Thread):\n else:\n table_name = dbconstants.DSC_PROPERTY_TABLE\n \n- app = entity_key.split(self.ds_access._SEPARATOR)[0]\n- try:\n- txn_id = self.acquire_lock_for_key(\n- app_id=app,\n- key=entity_key,\n- retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,\n- retry_time=self.ds_access.LOCK_RETRY_TIME\n- )\n- except zk.ZKTransactionException:\n- self.index_entries_delete_failures += 1\n- return\n-\n- entities = self.fetch_entity_dict_for_references(references)\n-\n- refs_to_delete = []\n- for reference in references:\n- index_elements = reference.keys()[0].split(self.ds_access._SEPARATOR)\n- prop_name = index_elements[self.ds_access.PROP_NAME_IN_SINGLE_PROP_INDEX]\n- if not self.ds_access._DatastoreDistributed__valid_index_entry(\n- reference, entities, direction, prop_name):\n- refs_to_delete.append(reference.keys()[0])\n-\n- logging.debug('Removing {} indexes starting with {}'.\n- format(len(refs_to_delete), [refs_to_delete[0]]))\n- try:\n- self.db_access.batch_delete(table_name, refs_to_delete,\n- column_names=dbconstants.PROPERTY_SCHEMA)\n- self.index_entries_cleaned += len(refs_to_delete)\n- except Exception:\n- logging.exception('Unable to delete indexes')\n- self.index_entries_delete_failures += 1\n-\n- self.release_lock_for_key(\n- app_id=app,\n- key=entity_key,\n- txn_id=txn_id,\n- retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,\n- retry_time=self.ds_access.LOCK_RETRY_TIME\n- )\n+ group_key = self.guess_group_from_table_key(entity_key)\n+ entity_lock = EntityLock(self.zoo_keeper.handle, [group_key])\n+ with entity_lock:\n+ entities = self.fetch_entity_dict_for_references(references)\n+\n+ refs_to_delete = []\n+ for reference in references:\n+ index_elements = reference.keys()[0].split(self.ds_access._SEPARATOR)\n+ prop = index_elements[self.ds_access.PROP_NAME_IN_SINGLE_PROP_INDEX]\n+ if not self.ds_access._DatastoreDistributed__valid_index_entry(\n+ reference, entities, direction, prop):\n+ refs_to_delete.append(reference.keys()[0])\n+\n+ logging.debug('Removing {} indexes starting with {}'.\n+ format(len(refs_to_delete), [refs_to_delete[0]]))\n+ try:\n+ self.db_access.batch_delete(table_name, refs_to_delete,\n+ column_names=dbconstants.PROPERTY_SCHEMA)\n+ self.index_entries_cleaned += len(refs_to_delete)\n+ except Exception:\n+ logging.exception('Unable to delete indexes')\n+ self.index_entries_delete_failures += 1\n \n def lock_and_delete_kind_index(self, reference):\n \"\"\" For a list of index entries that have the same entity, lock the entity\n@@ -408,37 +354,21 @@ class DatastoreGroomer(threading.Thread):\n \"\"\"\n table_name = dbconstants.APP_KIND_TABLE\n entity_key = reference.values()[0].values()[0]\n- app = entity_key.split(self.ds_access._SEPARATOR)[0]\n- try:\n- txn_id = self.acquire_lock_for_key(\n- app_id=app,\n- key=entity_key,\n- retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,\n- retry_time=self.ds_access.LOCK_RETRY_TIME\n- )\n- except zk.ZKTransactionException:\n- self.index_entries_delete_failures += 1\n- return\n-\n- entities = self.fetch_entity_dict_for_references([reference])\n- if entity_key not in entities:\n- index_to_delete = reference.keys()[0]\n- logging.debug('Removing {}'.format([index_to_delete]))\n- try:\n- self.db_access.batch_delete(table_name, [index_to_delete],\n- column_names=dbconstants.APP_KIND_SCHEMA)\n- self.index_entries_cleaned += 1\n- except dbconstants.AppScaleDBConnectionError:\n- logging.exception('Unable to delete index.')\n- self.index_entries_delete_failures += 1\n \n- self.release_lock_for_key(\n- app_id=app,\n- key=entity_key,\n- txn_id=txn_id,\n- retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,\n- retry_time=self.ds_access.LOCK_RETRY_TIME\n- )\n+ group_key = self.guess_group_from_table_key(entity_key)\n+ entity_lock = EntityLock(self.zoo_keeper.handle, [group_key])\n+ with entity_lock:\n+ entities = self.fetch_entity_dict_for_references([reference])\n+ if entity_key not in entities:\n+ index_to_delete = reference.keys()[0]\n+ logging.debug('Removing {}'.format([index_to_delete]))\n+ try:\n+ self.db_access.batch_delete(table_name, [index_to_delete],\n+ column_names=dbconstants.APP_KIND_SCHEMA)\n+ self.index_entries_cleaned += 1\n+ except dbconstants.AppScaleDBConnectionError:\n+ logging.exception('Unable to delete index.')\n+ self.index_entries_delete_failures += 1\n \n def clean_up_indexes(self, direction):\n \"\"\" Deletes invalid single property index entries.\n" } ]
b08b928d2937caa7ea70ba57839c52316390d9df
appscale/gts
22.11.2017 09:01:56
Apache License 2.0
Allow Python runtime to use an external API server If given an external api port, the Python runtime will use it to make App Identity calls.
[ { "change_type": "MODIFY", "old_path": "AppServer/google/appengine/ext/remote_api/remote_api_stub.py", "new_path": "AppServer/google/appengine/ext/remote_api/remote_api_stub.py", "diff": "@@ -592,7 +592,8 @@ def GetRemoteAppIdFromServer(server, path, remote_token=None):\n def ConfigureRemoteApiFromServer(server, path, app_id, services=None,\n default_auth_domain=None,\n use_remote_datastore=True,\n- use_async_rpc=False):\n+ use_async_rpc=False,\n+ external_server=None):\n \"\"\"Does necessary setup to allow easy remote access to App Engine APIs.\n \n Args:\n@@ -609,6 +610,8 @@ def ConfigureRemoteApiFromServer(server, path, app_id, services=None,\n a single request.\n use_async_rpc: A boolean indicating whether or not to make RPC calls in a\n separate thread.\n+ external_server: An AbstractRpcServer specifying the location of an\n+ external API server.\n \n Raises:\n urllib2.HTTPError: if app_id is not provided and there is an error while\n@@ -636,11 +639,20 @@ def ConfigureRemoteApiFromServer(server, path, app_id, services=None,\n apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)\n \n if use_async_rpc:\n- stub = RuntimeRemoteStub(server, path)\n+ stub_type = RuntimeRemoteStub\n else:\n- stub = RemoteStub(server, path)\n+ stub_type = RemoteStub\n+\n+ stub = stub_type(server, path)\n+ external_stub = None\n+ if external_server is not None:\n+ external_stub = stub_type(external_server, path)\n+\n for service in services:\n- apiproxy_stub_map.apiproxy.RegisterStub(service, stub)\n+ if service == 'app_identity_service' and external_stub is not None:\n+ apiproxy_stub_map.apiproxy.RegisterStub(service, external_stub)\n+ else:\n+ apiproxy_stub_map.apiproxy.RegisterStub(service, stub)\n \n \n def GetRemoteAppId(servername,\n@@ -691,7 +703,8 @@ def ConfigureRemoteApi(app_id,\n default_auth_domain=None,\n save_cookies=False,\n use_remote_datastore=True,\n- use_async_rpc=False):\n+ use_async_rpc=False,\n+ external_api_server=None):\n \"\"\"Does necessary setup to allow easy remote access to App Engine APIs.\n \n Either servername must be provided or app_id must not be None. If app_id\n@@ -727,6 +740,8 @@ def ConfigureRemoteApi(app_id,\n a single request.\n use_async_rpc: A boolean indicating whether or not to make RPC calls in a\n separate thread.\n+ external_api_server: A string specifying the location of an external API\n+ server.\n \n Returns:\n server, the server created by rpc_server_factory, which may be useful for\n@@ -744,12 +759,20 @@ def ConfigureRemoteApi(app_id,\n server = rpc_server_factory(servername, auth_func, GetUserAgent(),\n GetSourceName(), save_cookies=save_cookies,\n debug_data=False, secure=secure)\n+\n+ if external_api_server is None:\n+ external_server = server\n+ else:\n+ external_server = rpc_server_factory(\n+ external_api_server, auth_func, GetUserAgent(), GetSourceName(),\n+ save_cookies=save_cookies, debug_data=False, secure=secure)\n+\n if not app_id:\n app_id = GetRemoteAppIdFromServer(server, path, rtok)\n \n ConfigureRemoteApiFromServer(server, path, app_id, services,\n default_auth_domain, use_remote_datastore,\n- use_async_rpc)\n+ use_async_rpc, external_server)\n return server\n \n \n" }, { "change_type": "MODIFY", "old_path": "AppServer/google/appengine/tools/devappserver2/devappserver2.py", "new_path": "AppServer/google/appengine/tools/devappserver2/devappserver2.py", "diff": "@@ -436,6 +436,9 @@ def create_command_line_parser():\n \n # AppScale\n appscale_group = parser.add_argument_group('AppScale')\n+ appscale_group.add_argument(\n+ '--external_api_port', type=int,\n+ help='The port of the external server that handles API calls')\n appscale_group.add_argument(\n '--login_server',\n help='the FQDN or IP address where users should be redirected to when the '\n@@ -594,7 +597,8 @@ class DevelopmentServer(object):\n module_to_max_instances,\n options.use_mtime_file_watcher,\n options.automatic_restart,\n- options.allow_skipped_files)\n+ options.allow_skipped_files,\n+ options.external_api_port)\n request_data = wsgi_request_info.WSGIRequestInfo(self._dispatcher)\n \n storage_path = _get_storage_path(options.storage_path, configuration.app_id)\n" }, { "change_type": "MODIFY", "old_path": "AppServer/google/appengine/tools/devappserver2/dispatcher.py", "new_path": "AppServer/google/appengine/tools/devappserver2/dispatcher.py", "diff": "@@ -73,7 +73,8 @@ class Dispatcher(request_info.Dispatcher):\n module_to_max_instances,\n use_mtime_file_watcher,\n automatic_restart,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for Dispatcher.\n \n Args:\n@@ -109,6 +110,8 @@ class Dispatcher(request_info.Dispatcher):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n self._configuration = configuration\n self._php_executable_path = php_executable_path\n@@ -117,6 +120,7 @@ class Dispatcher(request_info.Dispatcher):\n self._cloud_sql_config = cloud_sql_config\n self._request_data = None\n self._api_port = None\n+ self._external_api_port = external_api_port\n self._running_modules = []\n self._module_configurations = {}\n self._host = host\n@@ -159,7 +163,8 @@ class Dispatcher(request_info.Dispatcher):\n for module_configuration in self._configuration.modules:\n self._module_configurations[\n module_configuration.module_name] = module_configuration\n- _module, port = self._create_module(module_configuration, port)\n+ _module, port = self._create_module(module_configuration, port,\n+ self._external_api_port)\n _module.start()\n self._module_name_to_module[module_configuration.module_name] = _module\n logging.info('Starting module \"%s\" running at: http://%s',\n@@ -229,7 +234,7 @@ class Dispatcher(request_info.Dispatcher):\n for _module in self._module_name_to_module.values():\n _module.quit()\n \n- def _create_module(self, module_configuration, port):\n+ def _create_module(self, module_configuration, port, external_port=None):\n max_instances = self._module_to_max_instances.get(\n module_configuration.module_name)\n module_args = (module_configuration,\n@@ -250,12 +255,13 @@ class Dispatcher(request_info.Dispatcher):\n self._use_mtime_file_watcher,\n self._automatic_restart,\n self._allow_skipped_files)\n+ module_kwargs = {'external_api_port': external_port}\n if module_configuration.manual_scaling:\n- _module = module.ManualScalingModule(*module_args)\n+ _module = module.ManualScalingModule(*module_args, **module_kwargs)\n elif module_configuration.basic_scaling:\n- _module = module.BasicScalingModule(*module_args)\n+ _module = module.BasicScalingModule(*module_args, **module_kwargs)\n else:\n- _module = module.AutoScalingModule(*module_args)\n+ _module = module.AutoScalingModule(*module_args, **module_kwargs)\n \n if port != 0:\n port += 1000\n" }, { "change_type": "MODIFY", "old_path": "AppServer/google/appengine/tools/devappserver2/module.py", "new_path": "AppServer/google/appengine/tools/devappserver2/module.py", "diff": "@@ -28,6 +28,7 @@ import os.path\n import random\n import re\n import string\n+import struct\n import threading\n import time\n import urllib\n@@ -259,7 +260,15 @@ class Module(object):\n runtime_config.skip_files = str(self._module_configuration.skip_files)\n runtime_config.static_files = _static_files_regex_from_handlers(\n self._module_configuration.handlers)\n- runtime_config.api_port = self._api_port\n+\n+ # AppScale: Pack both API ports into the same field.\n+ if (self._external_api_port is not None and\n+ self._module_configuration.runtime == 'python27'):\n+ port_bytes = struct.pack('HH', self._api_port, self._external_api_port)\n+ runtime_config.api_port = struct.unpack('I', port_bytes)[0]\n+ else:\n+ runtime_config.api_port = self._api_port\n+\n runtime_config.stderr_log_level = self._runtime_stderr_loglevel\n runtime_config.datacenter = 'us1'\n runtime_config.auth_domain = self._auth_domain\n@@ -352,7 +361,8 @@ class Module(object):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for Module.\n \n Args:\n@@ -394,11 +404,14 @@ class Module(object):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n self._module_configuration = module_configuration\n self._name = module_configuration.module_name\n self._host = host\n self._api_port = api_port\n+ self._external_api_port = external_api_port\n self._auth_domain = auth_domain\n self._runtime_stderr_loglevel = runtime_stderr_loglevel\n self._balanced_port = balanced_port\n@@ -895,7 +908,8 @@ class AutoScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for AutoScalingModule.\n \n Args:\n@@ -937,6 +951,8 @@ class AutoScalingModule(Module):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n super(AutoScalingModule, self).__init__(module_configuration,\n host,\n@@ -955,7 +971,8 @@ class AutoScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files)\n+ allow_skipped_files,\n+ external_api_port)\n \n self._process_automatic_scaling(\n self._module_configuration.automatic_scaling)\n@@ -1327,7 +1344,8 @@ class ManualScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for ManualScalingModule.\n \n Args:\n@@ -1369,6 +1387,8 @@ class ManualScalingModule(Module):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n super(ManualScalingModule, self).__init__(module_configuration,\n host,\n@@ -1387,7 +1407,8 @@ class ManualScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files)\n+ allow_skipped_files,\n+ external_api_port)\n \n self._process_manual_scaling(module_configuration.manual_scaling)\n \n@@ -1823,7 +1844,8 @@ class BasicScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files):\n+ allow_skipped_files,\n+ external_api_port=None):\n \"\"\"Initializer for BasicScalingModule.\n \n Args:\n@@ -1865,6 +1887,8 @@ class BasicScalingModule(Module):\n allow_skipped_files: If True then all files in the application's directory\n are readable, even if they appear in a static handler or \"skip_files\"\n directive.\n+ external_api_port: An integer specifying the location of an external API\n+ server.\n \"\"\"\n super(BasicScalingModule, self).__init__(module_configuration,\n host,\n@@ -1883,7 +1907,8 @@ class BasicScalingModule(Module):\n max_instances,\n use_mtime_file_watcher,\n automatic_restarts,\n- allow_skipped_files)\n+ allow_skipped_files,\n+ external_api_port)\n self._process_basic_scaling(module_configuration.basic_scaling)\n \n self._instances = [] # Protected by self._condition.\n" }, { "change_type": "MODIFY", "old_path": "AppServer/google/appengine/tools/devappserver2/python/runtime.py", "new_path": "AppServer/google/appengine/tools/devappserver2/python/runtime.py", "diff": "@@ -19,6 +19,7 @@\n \n import base64\n import os\n+import struct\n import sys\n import time\n import traceback\n@@ -54,12 +55,18 @@ _STARTUP_FAILURE_TEMPLATE = \"\"\"\n </html>\"\"\"\n \n \n-def setup_stubs(config):\n+def setup_stubs(config, external_api_port=None):\n \"\"\"Sets up API stubs using remote API.\"\"\"\n+ if external_api_port is None:\n+ external_api_server = None\n+ else:\n+ external_api_server = 'localhost:{}'.format(external_api_port)\n+\n remote_api_stub.ConfigureRemoteApi(config.app_id, '/', lambda: ('', ''),\n 'localhost:%d' % config.api_port,\n use_remote_datastore=False,\n- use_async_rpc=True)\n+ use_async_rpc=True,\n+ external_api_server=external_api_server)\n \n if config.HasField('cloud_sql_config'):\n # Connect the RDBMS API to MySQL.\n@@ -119,6 +126,13 @@ def expand_user(path):\n def main():\n config = runtime_config_pb2.Config()\n config.ParseFromString(base64.b64decode(sys.stdin.read()))\n+\n+ # AppScale: The external port is packed in the same field as the API port.\n+ external_api_port = None\n+ if config.api_port > 65535:\n+ port_bytes = struct.pack('I', config.api_port)\n+ config.api_port, external_api_port = struct.unpack('HH', port_bytes)\n+\n debugging_app = None\n if config.python_config and config.python_config.startup_script:\n global_vars = {'config': config}\n@@ -144,7 +158,7 @@ def main():\n ('localhost', 0),\n debugging_app)\n else:\n- setup_stubs(config)\n+ setup_stubs(config, external_api_port)\n sandbox.enable_sandbox(config)\n os.path.expanduser = expand_user\n # This import needs to be after enabling the sandbox so the runtime\n" } ]
31ede29f649eb81eaaa8cb7665db020d7245de5c
appscale/gts
27.02.2018 08:22:59
Apache License 2.0
Attach list of groups to transaction node This allows the transaction groomer to track down which entity groups might have an entity lock that needs to be cleared after the transaction is resolved.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/datastore_distributed.py", "new_path": "AppDB/appscale/datastore/datastore_distributed.py", "diff": "@@ -560,6 +560,7 @@ class DatastoreDistributed():\n group_key = entity_pb.Reference(encoded_group_key)\n \n txid = self.transaction_manager.create_transaction_id(app, xg=False)\n+ self.transaction_manager.set_groups(app, txid, [group_key])\n lock = entity_lock.EntityLock(self.zookeeper.handle, [group_key], txid)\n try:\n with lock:\n@@ -896,6 +897,7 @@ class DatastoreDistributed():\n group_key = entity_pb.Reference(encoded_group_key)\n \n txid = self.transaction_manager.create_transaction_id(app_id, xg=False)\n+ self.transaction_manager.set_groups(app_id, txid, [group_key])\n lock = entity_lock.EntityLock(self.zookeeper.handle, [group_key], txid)\n try:\n with lock:\n@@ -3217,6 +3219,7 @@ class DatastoreDistributed():\n for index in self.datastore_batch.get_indices(app)]\n \n decoded_groups = (entity_pb.Reference(group) for group in tx_groups)\n+ self.transaction_manager.set_groups(app, txn, decoded_groups)\n lock = entity_lock.EntityLock(self.zookeeper.handle, decoded_groups, txn)\n \n with lock:\n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/zkappscale/transaction_manager.py", "new_path": "AppDB/appscale/datastore/zkappscale/transaction_manager.py", "diff": "@@ -1,6 +1,7 @@\n \"\"\" Generates and keeps track of transaction IDs. \"\"\"\n from __future__ import division\n \n+import json\n import logging\n import time\n \n@@ -14,6 +15,7 @@ from .constants import CONTAINER_PREFIX\n from .constants import COUNTER_NODE_PREFIX\n from .constants import MAX_SEQUENCE_COUNTER\n from .constants import OFFSET_NODE\n+from .entity_lock import zk_group_path\n from ..dbconstants import BadRequest\n from ..dbconstants import InternalError\n \n@@ -103,20 +105,8 @@ class ProjectTransactionManager(object):\n Args:\n txid: An integer specifying a transaction ID.\n \"\"\"\n- corrected_counter = txid - self._txid_manual_offset\n-\n- # The number of counters a container can store (including 0).\n- container_size = MAX_SEQUENCE_COUNTER + 1\n-\n- container_count = int(corrected_counter / container_size) + 1\n- container_suffix = '' if container_count == 1 else str(container_count)\n- container_name = CONTAINER_PREFIX + container_suffix\n- container_path = '/'.join([self._project_node, container_name])\n-\n- counter_value = corrected_counter % container_size\n- node_name = COUNTER_NODE_PREFIX + str(counter_value).zfill(10)\n- full_path = '/'.join([container_path, node_name])\n- self._delete_counter(full_path)\n+ path = self._txid_to_path(txid)\n+ self._delete_counter(path)\n \n def get_open_transactions(self):\n \"\"\" Fetches a list of active transactions.\n@@ -152,6 +142,23 @@ class ProjectTransactionManager(object):\n \n return txids\n \n+ def set_groups(self, txid, groups):\n+ \"\"\" Defines which groups will be involved in a transaction.\n+\n+ Args:\n+ txid: An integer specifying a transaction ID.\n+ groups: An iterable of entity group Reference objects.\n+ \"\"\"\n+ txid_path = self._txid_to_path(txid)\n+ groups_path = '/'.join([txid_path, 'groups'])\n+ encoded_groups = [zk_group_path(group) for group in groups]\n+ try:\n+ self.zk_client.create(groups_path, value=json.dumps(encoded_groups))\n+ except KazooException:\n+ message = 'Unable to set lock list for transaction'\n+ logger.exception(message)\n+ raise InternalError(message)\n+\n def _delete_counter(self, path):\n \"\"\" Removes a counter node.\n \n@@ -159,11 +166,7 @@ class ProjectTransactionManager(object):\n path: A string specifying a ZooKeeper path.\n \"\"\"\n try:\n- try:\n- self.zk_client.delete(path)\n- except NotEmptyError:\n- # Cross-group transaction nodes have a child node.\n- self.zk_client.delete(path, recursive=True)\n+ self.zk_client.delete(path, recursive=True)\n except KazooException:\n # Let the transaction groomer clean it up.\n logger.exception('Unable to delete counter')\n@@ -185,6 +188,28 @@ class ProjectTransactionManager(object):\n for container in all_containers\n if container not in self._inactive_containers)\n \n+ def _txid_to_path(self, txid):\n+ \"\"\" Determines the ZooKeeper path for a given transaction ID.\n+\n+ Args:\n+ txid: An integer specifying a transaction ID.\n+ Returns:\n+ A strings specifying the transaction's ZooKeeper path.\n+ \"\"\"\n+ corrected_counter = txid - self._txid_manual_offset\n+\n+ # The number of counters a container can store (including 0).\n+ container_size = MAX_SEQUENCE_COUNTER + 1\n+\n+ container_count = int(corrected_counter / container_size) + 1\n+ container_suffix = '' if container_count == 1 else str(container_count)\n+ container_name = CONTAINER_PREFIX + container_suffix\n+ container_path = '/'.join([self._project_node, container_name])\n+\n+ counter_value = corrected_counter % container_size\n+ node_name = COUNTER_NODE_PREFIX + str(counter_value).zfill(10)\n+ return '/'.join([container_path, node_name])\n+\n def _update_auto_offset(self):\n \"\"\" Ensures there is a usable sequence container. \"\"\"\n container_name = self._counter_path.split('/')[-1]\n@@ -308,6 +333,21 @@ class TransactionManager(object):\n \n return project_tx_manager.get_open_transactions()\n \n+ def set_groups(self, project_id, txid, groups):\n+ \"\"\" Defines which groups will be involved in a transaction.\n+\n+ Args:\n+ project_id: A string specifying a project ID.\n+ txid: An integer specifying a transaction ID.\n+ groups: An iterable of entity group Reference objects.\n+ \"\"\"\n+ try:\n+ project_tx_manager = self.projects[project_id]\n+ except KeyError:\n+ raise BadRequest('The project {} was not found'.format(project_id))\n+\n+ return project_tx_manager.set_groups(txid, groups)\n+\n def _update_projects_sync(self, new_project_ids):\n \"\"\" Updates the available projects for starting transactions.\n \n" }, { "change_type": "MODIFY", "old_path": "AppDB/test/unit/test_datastore_server.py", "new_path": "AppDB/test/unit/test_datastore_server.py", "diff": "@@ -371,7 +371,8 @@ class TestDatastoreServer(unittest.TestCase):\n db_batch.should_receive('batch_mutate')\n transaction_manager = flexmock(\n create_transaction_id=lambda project, xg: 1,\n- delete_transaction_id=lambda project, txid: None)\n+ delete_transaction_id=lambda project, txid: None,\n+ set_groups=lambda project, txid, groups: None)\n dd = DatastoreDistributed(db_batch, transaction_manager,\n self.get_zookeeper())\n putreq_pb = datastore_pb.PutRequest()\n@@ -410,7 +411,8 @@ class TestDatastoreServer(unittest.TestCase):\n db_batch.should_receive('batch_mutate')\n transaction_manager = flexmock(\n create_transaction_id=lambda project, xg: 1,\n- delete_transaction_id=lambda project, txid: None)\n+ delete_transaction_id=lambda project, txid: None,\n+ set_groups=lambda project, txid, groups: None)\n dd = DatastoreDistributed(db_batch, transaction_manager,\n self.get_zookeeper())\n \n@@ -718,7 +720,8 @@ class TestDatastoreServer(unittest.TestCase):\n db_batch.should_receive('valid_data_version').and_return(True)\n transaction_manager = flexmock(\n create_transaction_id=lambda project, xg: 1,\n- delete_transaction_id=lambda project, txid: None)\n+ delete_transaction_id=lambda project, txid: None,\n+ set_groups=lambda project_id, txid, groups: None)\n dd = DatastoreDistributed(db_batch, transaction_manager,\n self.get_zookeeper())\n dd.dynamic_delete(\"appid\", del_request)\n@@ -1056,7 +1059,9 @@ class TestDatastoreServer(unittest.TestCase):\n \n db_batch.should_receive('get_indices').and_return([])\n \n- transaction_manager = flexmock()\n+ transaction_manager = flexmock(\n+ delete_transaction_id=lambda project_id, txid: None,\n+ set_groups=lambda project_id, txid, groups: None)\n dd = DatastoreDistributed(db_batch, transaction_manager,\n self.get_zookeeper())\n prefix = dd.get_table_prefix(entity)\n" } ]
c3f16251ede94fb1a44d774edf6f805402e31894
appscale/gts
27.02.2018 08:27:57
Apache License 2.0
Allow group locks to persist when commits fail This prevents other clients from writing to a group before a large batch is fully applied.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/datastore_distributed.py", "new_path": "AppDB/appscale/datastore/datastore_distributed.py", "diff": "@@ -16,6 +16,7 @@ from kazoo.client import KazooState\n from .dbconstants import APP_ENTITY_SCHEMA\n from .dbconstants import ID_KEY_LENGTH\n from .dbconstants import MAX_TX_DURATION\n+from .dbconstants import TimeoutError\n from .cassandra_env import cassandra_interface\n from .cassandra_env.entity_id_allocator import EntityIDAllocator\n from .cassandra_env.entity_id_allocator import ScatteredAllocator\n@@ -561,32 +562,38 @@ class DatastoreDistributed():\n \n txid = self.transaction_manager.create_transaction_id(app, xg=False)\n self.transaction_manager.set_groups(app, txid, [group_key])\n+\n+ # Allow the lock to stick around if there is an issue applying the batch.\n lock = entity_lock.EntityLock(self.zookeeper.handle, [group_key], txid)\n try:\n- with lock:\n- batch = []\n- entity_changes = []\n- for entity in entity_list:\n- prefix = self.get_table_prefix(entity)\n- entity_key = get_entity_key(prefix, entity.key().path())\n-\n- current_value = None\n- if current_values[entity_key]:\n- current_value = entity_pb.EntityProto(\n- current_values[entity_key][APP_ENTITY_SCHEMA[0]])\n-\n- batch.extend(mutations_for_entity(entity, txid, current_value,\n- composite_indexes))\n-\n- batch.append({'table': 'group_updates',\n- 'key': bytearray(encoded_group_key),\n- 'last_update': txid})\n-\n- entity_changes.append(\n- {'key': entity.key(), 'old': current_value, 'new': entity})\n- self.datastore_batch.batch_mutate(app, batch, entity_changes, txid)\n- finally:\n- self.transaction_manager.delete_transaction_id(app, txid)\n+ lock.acquire()\n+ except entity_lock.LockTimeout:\n+ raise TimeoutError('Unable to acquire entity group lock')\n+\n+ batch = []\n+ entity_changes = []\n+ for entity in entity_list:\n+ prefix = self.get_table_prefix(entity)\n+ entity_key = get_entity_key(prefix, entity.key().path())\n+\n+ current_value = None\n+ if current_values[entity_key]:\n+ current_value = entity_pb.EntityProto(\n+ current_values[entity_key][APP_ENTITY_SCHEMA[0]])\n+\n+ batch.extend(mutations_for_entity(entity, txid, current_value,\n+ composite_indexes))\n+\n+ batch.append({'table': 'group_updates',\n+ 'key': bytearray(encoded_group_key),\n+ 'last_update': txid})\n+\n+ entity_changes.append(\n+ {'key': entity.key(), 'old': current_value, 'new': entity})\n+ self.datastore_batch.batch_mutate(app, batch, entity_changes, txid)\n+\n+ lock.release()\n+ self.transaction_manager.delete_transaction_id(app, txid)\n \n def delete_entities(self, group, txid, keys, composite_indexes=()):\n \"\"\" Deletes the entities and the indexes associated with them.\n@@ -658,10 +665,7 @@ class DatastoreDistributed():\n self.datastore_batch.put_entities_tx(\n app_id, put_request.transaction().handle(), entities)\n else:\n- try:\n- self.put_entities(app_id, entities, put_request.composite_index_list())\n- except entity_lock.LockTimeout as timeout_error:\n- raise dbconstants.AppScaleDBConnectionError(str(timeout_error))\n+ self.put_entities(app_id, entities, put_request.composite_index_list())\n self.logger.debug('Updated {} entities'.format(len(entities)))\n \n put_response.key_list().extend([e.key() for e in entities])\n@@ -898,20 +902,25 @@ class DatastoreDistributed():\n \n txid = self.transaction_manager.create_transaction_id(app_id, xg=False)\n self.transaction_manager.set_groups(app_id, txid, [group_key])\n+\n+ # Allow the lock to stick around if there is an issue applying the batch.\n lock = entity_lock.EntityLock(self.zookeeper.handle, [group_key], txid)\n try:\n- with lock:\n- self.delete_entities(\n- group_key,\n- txid,\n- key_list,\n- composite_indexes=filtered_indexes\n- )\n- self.logger.debug('Removed {} entities'.format(len(key_list)))\n- except entity_lock.LockTimeout as timeout_error:\n- raise dbconstants.AppScaleDBConnectionError(str(timeout_error))\n- finally:\n- self.transaction_manager.delete_transaction_id(app_id, txid)\n+ lock.acquire()\n+ except entity_lock.LockTimeout:\n+ raise TimeoutError('Unable to acquire entity group lock')\n+\n+ self.delete_entities(\n+ group_key,\n+ txid,\n+ key_list,\n+ composite_indexes=filtered_indexes\n+ )\n+\n+ lock.release()\n+ self.logger.debug('Removed {} entities'.format(len(key_list)))\n+\n+ self.transaction_manager.delete_transaction_id(app_id, txid)\n \n def generate_filter_info(self, filters):\n \"\"\"Transform a list of filters into a more usable form.\n@@ -3220,60 +3229,70 @@ class DatastoreDistributed():\n \n decoded_groups = (entity_pb.Reference(group) for group in tx_groups)\n self.transaction_manager.set_groups(app, txn, decoded_groups)\n- lock = entity_lock.EntityLock(self.zookeeper.handle, decoded_groups, txn)\n \n- with lock:\n- group_txids = self.datastore_batch.group_updates(metadata['reads'])\n- for group_txid in group_txids:\n- if group_txid in metadata['in_progress'] or group_txid > txn:\n- raise dbconstants.ConcurrentModificationException(\n- 'A group was modified after this transaction was started.')\n-\n- # Fetch current values so we can remove old indices.\n- entity_table_keys = [encode_entity_table_key(key)\n- for key, _ in metadata['puts'].iteritems()]\n- entity_table_keys.extend([encode_entity_table_key(key)\n- for key in metadata['deletes']])\n- current_values = self.datastore_batch.batch_get_entity(\n- dbconstants.APP_ENTITY_TABLE, entity_table_keys, APP_ENTITY_SCHEMA)\n+ # Allow the lock to stick around if there is an issue applying the batch.\n+ lock = entity_lock.EntityLock(self.zookeeper.handle, decoded_groups, txn)\n+ try:\n+ lock.acquire()\n+ except entity_lock.LockTimeout:\n+ raise TimeoutError('Unable to acquire entity group locks')\n+\n+ group_txids = self.datastore_batch.group_updates(metadata['reads'])\n+ for group_txid in group_txids:\n+ if group_txid in metadata['in_progress'] or group_txid > txn:\n+ lock.release()\n+ self.transaction_manager.delete_transaction_id(app, txn)\n+ raise dbconstants.ConcurrentModificationException(\n+ 'A group was modified after this transaction was started.')\n+\n+ # Fetch current values so we can remove old indices.\n+ entity_table_keys = [encode_entity_table_key(key)\n+ for key, _ in metadata['puts'].iteritems()]\n+ entity_table_keys.extend([encode_entity_table_key(key)\n+ for key in metadata['deletes']])\n+ current_values = self.datastore_batch.batch_get_entity(\n+ dbconstants.APP_ENTITY_TABLE, entity_table_keys, APP_ENTITY_SCHEMA)\n+\n+ batch = []\n+ entity_changes = []\n+ for encoded_key, encoded_entity in metadata['puts'].iteritems():\n+ key = entity_pb.Reference(encoded_key)\n+ entity_table_key = encode_entity_table_key(key)\n+ current_value = None\n+ if current_values[entity_table_key]:\n+ current_value = entity_pb.EntityProto(\n+ current_values[entity_table_key][APP_ENTITY_SCHEMA[0]])\n \n- batch = []\n- entity_changes = []\n- for encoded_key, encoded_entity in metadata['puts'].iteritems():\n- key = entity_pb.Reference(encoded_key)\n- entity_table_key = encode_entity_table_key(key)\n- current_value = None\n- if current_values[entity_table_key]:\n- current_value = entity_pb.EntityProto(\n- current_values[entity_table_key][APP_ENTITY_SCHEMA[0]])\n+ entity = entity_pb.EntityProto(encoded_entity)\n+ mutations = mutations_for_entity(entity, txn, current_value,\n+ composite_indices)\n+ batch.extend(mutations)\n \n- entity = entity_pb.EntityProto(encoded_entity)\n- mutations = mutations_for_entity(entity, txn, current_value,\n- composite_indices)\n- batch.extend(mutations)\n+ entity_changes.append({'key': key, 'old': current_value,\n+ 'new': entity})\n \n- entity_changes.append({'key': key, 'old': current_value,\n- 'new': entity})\n+ for key in metadata['deletes']:\n+ entity_table_key = encode_entity_table_key(key)\n+ if not current_values[entity_table_key]:\n+ continue\n \n- for key in metadata['deletes']:\n- entity_table_key = encode_entity_table_key(key)\n- if not current_values[entity_table_key]:\n- continue\n+ current_value = entity_pb.EntityProto(\n+ current_values[entity_table_key][APP_ENTITY_SCHEMA[0]])\n \n- current_value = entity_pb.EntityProto(\n- current_values[entity_table_key][APP_ENTITY_SCHEMA[0]])\n+ deletions = deletions_for_entity(current_value, composite_indices)\n+ batch.extend(deletions)\n \n- deletions = deletions_for_entity(current_value, composite_indices)\n- batch.extend(deletions)\n+ entity_changes.append({'key': key, 'old': current_value, 'new': None})\n \n- entity_changes.append({'key': key, 'old': current_value, 'new': None})\n+ for group in groups_mutated:\n+ batch.append(\n+ {'table': 'group_updates', 'key': bytearray(group),\n+ 'last_update': txn})\n \n- for group in groups_mutated:\n- batch.append(\n- {'table': 'group_updates', 'key': bytearray(group),\n- 'last_update': txn})\n+ self.datastore_batch.batch_mutate(app, batch, entity_changes, txn)\n \n- self.datastore_batch.batch_mutate(app, batch, entity_changes, txn)\n+ lock.release()\n+ self.transaction_manager.delete_transaction_id(app, txn)\n \n # Process transactional tasks.\n if metadata['tasks']:\n@@ -3295,7 +3314,8 @@ class DatastoreDistributed():\n \n try:\n self.apply_txn_changes(app_id, txn_id)\n- except dbconstants.TxTimeoutException as timeout:\n+ except (dbconstants.TxTimeoutException,\n+ dbconstants.TimeoutError) as timeout:\n return commitres_pb.Encode(), datastore_pb.Error.TIMEOUT, str(timeout)\n except dbconstants.AppScaleDBConnectionError:\n self.logger.exception('DB connection error during commit')\n@@ -3304,17 +3324,10 @@ class DatastoreDistributed():\n except dbconstants.ConcurrentModificationException as error:\n return (commitres_pb.Encode(), datastore_pb.Error.CONCURRENT_TRANSACTION,\n str(error))\n- except dbconstants.TooManyGroupsException as error:\n+ except (dbconstants.TooManyGroupsException,\n+ dbconstants.BadRequest) as error:\n return (commitres_pb.Encode(), datastore_pb.Error.BAD_REQUEST,\n str(error))\n- except entity_lock.LockTimeout as error:\n- return (commitres_pb.Encode(), datastore_pb.Error.TIMEOUT,\n- str(error))\n-\n- try:\n- self.transaction_manager.delete_transaction_id(app_id, txn_id)\n- except dbconstants.BadRequest as error:\n- return '', datastore_pb.Error.BAD_REQUEST, str(error)\n \n return commitres_pb.Encode(), 0, \"\"\n \n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/dbconstants.py", "new_path": "AppDB/appscale/datastore/dbconstants.py", "diff": "@@ -197,6 +197,10 @@ class InternalError(Exception):\n \"\"\" Indicates that the datastore was unable to perform an operation. \"\"\"\n pass\n \n+class TimeoutError(Exception):\n+ \"\"\" Indicates that the datastore timed out while performing an operation. \"\"\"\n+ pass\n+\n class TooManyGroupsException(Exception):\n \"\"\" Indicates that there are too many groups involved in a transaction. \"\"\"\n pass\n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/scripts/datastore.py", "new_path": "AppDB/appscale/datastore/scripts/datastore.py", "diff": "@@ -645,6 +645,8 @@ class MainHandler(tornado.web.RequestHandler):\n return (putresp_pb.Encode(), 0, \"\")\n except dbconstants.InternalError as error:\n return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except dbconstants.TimeoutError as error:\n+ return '', datastore_pb.Error.TIMEOUT, str(error)\n except dbconstants.BadRequest as error:\n return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except zktransaction.ZKBadRequest as zkie:\n@@ -732,6 +734,8 @@ class MainHandler(tornado.web.RequestHandler):\n return (delresp_pb.Encode(), 0, \"\")\n except dbconstants.InternalError as error:\n return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n+ except dbconstants.TimeoutError as error:\n+ return '', datastore_pb.Error.TIMEOUT, str(error)\n except dbconstants.BadRequest as error:\n return '', datastore_pb.Error.BAD_REQUEST, str(error)\n except zktransaction.ZKBadRequest as zkie:\n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/zkappscale/entity_lock.py", "new_path": "AppDB/appscale/datastore/zkappscale/entity_lock.py", "diff": "@@ -211,8 +211,7 @@ class EntityLock(object):\n while True:\n try:\n node = self.client.create(\n- self.create_paths[index], self.data, ephemeral=True,\n- sequence=True)\n+ self.create_paths[index], self.data, sequence=True)\n break\n except NoNodeError:\n self.client.ensure_path(self.paths[index])\n" } ]
9006a0055f77afb3fb2eb29b7e9f9fff91888459
appscale/gts
27.05.2018 16:32:48
Apache License 2.0
Simplify rollback_transaction method This moves the protobuffer-related work to the proper layer.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/datastore_distributed.py", "new_path": "AppDB/appscale/datastore/datastore_distributed.py", "diff": "@@ -5,7 +5,6 @@ import logging\n import md5\n import random\n import sys\n-import threading\n import time\n \n from tornado import gen\n@@ -16,7 +15,8 @@ from appscale.datastore import dbconstants, helper_functions\n from appscale.common.unpackaged import APPSCALE_PYTHON_APPSERVER\n from kazoo.client import KazooState\n from appscale.datastore.dbconstants import (\n- APP_ENTITY_SCHEMA, ID_KEY_LENGTH, MAX_TX_DURATION, Timeout\n+ APP_ENTITY_SCHEMA, ID_KEY_LENGTH, InternalError,\n+ MAX_TX_DURATION, Timeout\n )\n from appscale.datastore.cassandra_env.entity_id_allocator import EntityIDAllocator\n from appscale.datastore.cassandra_env.entity_id_allocator import ScatteredAllocator\n@@ -39,7 +39,6 @@ from appscale.datastore.zkappscale import entity_lock\n from appscale.datastore.zkappscale import zktransaction\n \n sys.path.append(APPSCALE_PYTHON_APPSERVER)\n-from google.appengine.api import api_base_pb\n from google.appengine.api import datastore_errors\n from google.appengine.api.datastore_distributed import _MAX_ACTIONS_PER_TXN\n from google.appengine.datastore import appscale_stub_util\n@@ -3333,24 +3332,21 @@ class DatastoreDistributed():\n commitres_pb = datastore_pb.CommitResponse()\n raise gen.Return((commitres_pb.Encode(), 0, ''))\n \n- def rollback_transaction(self, app_id, http_request_data):\n+ def rollback_transaction(self, app_id, txid):\n \"\"\" Handles the rollback phase of a transaction.\n \n Args:\n app_id: The application ID requesting the rollback.\n- http_request_data: The encoded request, a datstore_pb.Transaction.\n- Returns:\n- An encoded protocol buffer void response.\n+ txid: An integer specifying a transaction ID.\n+ Raises:\n+ InternalError if unable to roll back transaction.\n \"\"\"\n- txn = datastore_pb.Transaction(http_request_data)\n self.logger.info(\n- 'Doing a rollback on transaction {} for {}'.format(txn.handle(), app_id))\n+ 'Doing a rollback on transaction {} for {}'.format(txid, app_id))\n try:\n- self.zookeeper.notify_failed_transaction(app_id, txn.handle())\n- return api_base_pb.VoidProto().Encode(), 0, ''\n- except zktransaction.ZKTransactionException as zkte:\n- self.logger.exception('Unable to rollback {} for {}'.format(txn, app_id))\n- return '', datastore_pb.Error.PERMISSION_DENIED, str(zkte)\n+ self.zookeeper.notify_failed_transaction(app_id, txid)\n+ except zktransaction.ZKTransactionException as error:\n+ raise InternalError(str(error))\n \n def _zk_state_listener(self, state):\n \"\"\" Handles changes to the ZooKeeper connection state.\n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/scripts/datastore.py", "new_path": "AppDB/appscale/datastore/scripts/datastore.py", "diff": "@@ -358,16 +358,18 @@ class MainHandler(tornado.web.RequestHandler):\n return ('', datastore_pb.Error.CAPABILITY_DISABLED,\n 'Datastore is in read-only mode.')\n \n+ txn = datastore_pb.Transaction(http_request_data)\n try:\n- return datastore_access.rollback_transaction(app_id, http_request_data)\n- except zktransaction.ZKInternalException as error:\n- logger.exception('ZKInternalException during {} for {}'.\n- format(http_request_data, app_id))\n+ datastore_access.rollback_transaction(app_id, txn.handle())\n+ except dbconstants.InternalError as error:\n+ logger.exception('Unable to rollback transaction')\n return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n except Exception as error:\n logger.exception('Unable to rollback transaction')\n return '', datastore_pb.Error.INTERNAL_ERROR, str(error)\n \n+ return api_base_pb.VoidProto().Encode(), 0, ''\n+\n @gen.coroutine\n def run_query(self, http_request_data):\n \"\"\" High level function for running queries.\n" }, { "change_type": "MODIFY", "old_path": "AppDB/test/unit/test_datastore_server.py", "new_path": "AppDB/test/unit/test_datastore_server.py", "diff": "@@ -326,9 +326,8 @@ class TestDatastoreServer(testing.AsyncTestCase):\n commit_request = datastore_pb.Transaction()\n commit_request.set_handle(123)\n commit_request.set_app(\"aaa\")\n- http_request = commit_request.Encode()\n- self.assertEquals(dd.rollback_transaction(\"app_id\", http_request),\n- (api_base_pb.VoidProto().Encode(), 0, \"\"))\n+ self.assertEquals(\n+ dd.rollback_transaction(\"app_id\", commit_request.handle()), None)\n \n @staticmethod\n def get_new_entity_proto(app_id, kind, entity_name, prop_name, prop_value, ns=\"\"):\n" } ]
c6d4ab680da88e14ceafe6f7947f0cf858158f2d
appscale/gts
31.07.2018 08:34:18
Apache License 2.0
Handle UpdateIndexes calls This adds index definitions to a project's ZooKeeper node if they aren't there yet.
[ { "change_type": "MODIFY", "old_path": "AdminServer/appscale/admin/__init__.py", "new_path": "AdminServer/appscale/admin/__init__.py", "diff": "@@ -39,6 +39,7 @@ from tornado.ioloop import IOLoop\n from . import utils\n from . import constants\n from .appengine_api import UpdateCronHandler\n+from .appengine_api import UpdateIndexesHandler\n from .appengine_api import UpdateQueuesHandler\n from .base_handler import BaseHandler\n from .constants import (\n@@ -1254,6 +1255,8 @@ def main():\n {'ua_client': ua_client}),\n ('/api/cron/update', UpdateCronHandler,\n {'acc': acc, 'zk_client': zk_client, 'ua_client': ua_client}),\n+ ('/api/datastore/index/add', UpdateIndexesHandler,\n+ {'zk_client': zk_client, 'ua_client': ua_client}),\n ('/api/queue/update', UpdateQueuesHandler,\n {'zk_client': zk_client, 'ua_client': ua_client})\n ])\n" }, { "change_type": "MODIFY", "old_path": "AdminServer/appscale/admin/appengine_api.py", "new_path": "AdminServer/appscale/admin/appengine_api.py", "diff": "@@ -20,6 +20,207 @@ from .utils import queues_from_dict\n logger = logging.getLogger('appscale-admin')\n \n \n+class IndexProperty(object):\n+ \"\"\" Represents a datastore index property. \"\"\"\n+\n+ __slots__ = ['name', 'direction']\n+\n+ def __init__(self, name, direction):\n+ \"\"\" Creates a new IndexProperty object.\n+\n+ Args:\n+ name: A string specifying the property name.\n+ direction: A string specifying the index direction (asc or desc).\n+ \"\"\"\n+ if not name:\n+ raise InvalidConfiguration('Index property missing \"name\"')\n+\n+ if direction not in ('asc', 'desc'):\n+ raise InvalidConfiguration(\n+ 'Invalid \"direction\" value: {}'.format(direction))\n+\n+ self.name = name\n+ self.direction = direction\n+\n+ @property\n+ def id(self):\n+ if self.direction == 'asc':\n+ return self.name\n+ else:\n+ return ','.join([self.name, 'desc'])\n+\n+ def to_dict(self):\n+ \"\"\" Generates a JSON-safe dictionary representation of the property.\n+\n+ Returns:\n+ A dictionary containing the property details.\n+ \"\"\"\n+ return {'name': self.name, 'direction': self.direction}\n+\n+ @classmethod\n+ def from_dict(cls, prop):\n+ \"\"\" Constructs an IndexProperty from a JSON-derived dictionary.\n+\n+ Args:\n+ prop: A dictionary containing the name and direction fields.\n+ Returns:\n+ An IndexProperty object.\n+ \"\"\"\n+ return cls(prop['name'], prop['direction'])\n+\n+\n+class DatastoreIndex(object):\n+ \"\"\" Represents a datastore index. \"\"\"\n+\n+ __slots__ = ['kind', 'ancestor', 'properties']\n+\n+ # Separates fields of an encoded index.\n+ ENCODING_DELIMITER = '|'\n+\n+ def __init__(self, kind, ancestor, properties):\n+ \"\"\" Creates a new DatastoreIndex object.\n+\n+ Args:\n+ kind: A string specifying the datastore kind.\n+ ancestor: A boolean indicating whether or not the index is for\n+ satisfying ancestor queries.\n+ properties: A list of IndexProperty objects.\n+ \"\"\"\n+ self.kind = kind\n+ self.ancestor = ancestor\n+ self.properties = properties\n+\n+ @property\n+ def id(self):\n+ encoded_ancestor = '1' if self.ancestor else '0'\n+ encoded_properties = self.ENCODING_DELIMITER.join(\n+ [prop.id for prop in self.properties])\n+ return self.ENCODING_DELIMITER.join(\n+ [self.kind, encoded_ancestor, encoded_properties])\n+\n+ @classmethod\n+ def from_yaml(cls, entry):\n+ \"\"\" Constructs a DatastoreIndex from a parsed index.yaml entry.\n+\n+ Args:\n+ entry: A dictionary generated from a index.yaml file.\n+ Returns:\n+ A DatastoreIndex object.\n+ Raises:\n+ InvalidConfiguration exception if entry is invalid.\n+ \"\"\"\n+ kind = entry.get('kind')\n+ if not kind:\n+ raise InvalidConfiguration('Index entry is missing \"kind\" field')\n+\n+ ancestor = entry.get('ancestor', False)\n+ if not isinstance(ancestor, bool):\n+ if ancestor.lower() not in ('yes', 'no', 'true', 'false'):\n+ raise InvalidConfiguration(\n+ 'Invalid \"ancestor\" value: {}'.format(ancestor))\n+\n+ ancestor = ancestor.lower() in ('yes', 'true')\n+\n+ configured_props = entry.get('properties', [])\n+ if not configured_props:\n+ raise InvalidConfiguration('Index missing properties')\n+\n+ properties = [IndexProperty(prop.get('name'), prop.get('direction', 'asc'))\n+ for prop in configured_props]\n+ return cls(kind, ancestor, properties)\n+\n+ def to_dict(self):\n+ \"\"\" Generates a JSON-safe dictionary representation of the index.\n+\n+ Returns:\n+ A dictionary containing the index details.\n+ \"\"\"\n+ return {\n+ 'kind': self.kind,\n+ 'ancestor': self.ancestor,\n+ 'properties': [prop.to_dict() for prop in self.properties]\n+ }\n+\n+ @classmethod\n+ def from_dict(cls, entry):\n+ \"\"\" Constructs a DatastoreIndex from a JSON-derived dictionary.\n+\n+ Args:\n+ entry: A dictionary containing the kind, ancestor, and properties fields.\n+ Returns:\n+ A DatastoreIndex object.\n+ \"\"\"\n+ properties = [IndexProperty.from_dict(prop)\n+ for prop in entry['properties']]\n+ return cls(entry['kind'], entry['ancestor'], properties)\n+\n+\n+class UpdateIndexesHandler(BaseHandler):\n+ \"\"\" Handles UpdateIndexes operations. \"\"\"\n+ def initialize(self, zk_client, ua_client):\n+ \"\"\" Defines required resources to handle requests.\n+\n+ Args:\n+ zk_client: A KazooClient.\n+ ua_client: A UAClient.\n+ \"\"\"\n+ self.zk_client = zk_client\n+ self.ua_client = ua_client\n+\n+ def post(self):\n+ \"\"\" Handles UpdateIndexes operations. \"\"\"\n+ project_id = self.get_argument('app_id', None)\n+ if project_id is None:\n+ raise CustomHTTPError(HTTPCodes.BAD_REQUEST,\n+ message='app_id parameter is required')\n+ self.authenticate(project_id, self.ua_client)\n+\n+ try:\n+ payload = yaml.safe_load(self.request.body)\n+ except ParserError:\n+ raise CustomHTTPError(HTTPCodes.BAD_REQUEST,\n+ message='Payload must be valid YAML')\n+\n+ try:\n+ given_indexes = payload['indexes']\n+ except KeyError:\n+ raise CustomHTTPError(HTTPCodes.BAD_REQUEST,\n+ message='Payload must contain \"indexes\"')\n+\n+ # If there are no new indexes being added, there's no work to be done.\n+ if not given_indexes:\n+ return\n+\n+ try:\n+ given_indexes = [DatastoreIndex.from_yaml(index)\n+ for index in given_indexes]\n+ except InvalidConfiguration as error:\n+ raise CustomHTTPError(HTTPCodes.BAD_REQUEST, message=str(error))\n+\n+ indexes_node = '/appscale/projects/{}/indexes'.format(project_id)\n+ try:\n+ existing_indexes, znode_stat = self.zk_client.get(indexes_node)\n+ except NoNodeError:\n+ encoded_indexes = json.dumps(\n+ [index.to_dict() for index in given_indexes])\n+ self.zk_client.create(indexes_node, encoded_indexes)\n+ return\n+\n+ combined_indexes = [DatastoreIndex.from_dict(index)\n+ for index in json.loads(existing_indexes)]\n+ existing_index_ids = {index.id for index in combined_indexes}\n+ for new_index in given_indexes:\n+ if new_index.id not in existing_index_ids:\n+ combined_indexes.append(new_index)\n+\n+ encoded_indexes = json.dumps(\n+ [index.to_dict() for index in combined_indexes])\n+ self.zk_client.set(indexes_node, encoded_indexes,\n+ version=znode_stat.version)\n+\n+ logger.info('Updated indexes for {}'.format(project_id))\n+\n+\n class UpdateQueuesHandler(BaseHandler):\n \"\"\" Handles UpdateQueues operations. \"\"\"\n def initialize(self, zk_client, ua_client):\n" } ]
865405bd4981add596149b09f72bdf5599289057
appscale/gts
03.01.2019 18:30:43
Apache License 2.0
Improve speed of rebalance script This removes the call to `nodetool ring`, which can get unreasonably slow as the amount of data in a BOP cluster increases. It also adds a couple flags that allow the `nodetool status` call to be skipped if the user is already sure the sanity checks will pass.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/cassandra_env/rebalance.py", "new_path": "AppDB/appscale/datastore/cassandra_env/rebalance.py", "diff": "@@ -1,4 +1,5 @@\n from __future__ import division\n+import argparse\n import logging\n import os\n \n@@ -17,28 +18,6 @@ MAX_DRIFT = .3\n logger = logging.getLogger(__name__)\n \n \n-class InvalidUnits(Exception):\n- \"\"\" Indicates an unexpected units value. \"\"\"\n- pass\n-\n-\n-def load_bytes(value, units):\n- \"\"\" Convert a human-friendly size to bytes.\n-\n- Args:\n- value: A float containing a size.\n- units: A string specifying the units.\n- Returns:\n- An integer representing the number of bytes.\n- Raises:\n- InvalidUnits if the units string is not recognized.\n- \"\"\"\n- magnitudes = {'KiB': 1, 'MiB': 2, 'GiB': 3, 'TiB': 4}\n- if units not in magnitudes:\n- raise InvalidUnits('{} not a recognized unit'.format(units))\n- return int(value * 1024 ** magnitudes[units])\n-\n-\n def get_status():\n \"\"\" Return the cluster status in a structured way.\n \n@@ -59,42 +38,70 @@ def get_status():\n return nodes\n \n \n-def get_ring():\n- \"\"\" Return the ring status in a structured way.\n+def get_gossip():\n+ \"\"\" Return the cluster gossip in a structured way.\n \n Returns:\n A list of nodes represented by dictionaries.\n \"\"\"\n- ring_output = check_output([NODE_TOOL, 'ring', KEYSPACE])\n- ring = []\n- index = 0\n- for line in ring_output.splitlines():\n- fields = line.split()\n- if len(fields) != 8:\n- continue\n+ nodes = []\n+ current_node = None\n+ for line in check_output([NODE_TOOL, 'gossipinfo']).splitlines():\n+ if line.startswith('/'):\n+ if current_node is not None:\n+ nodes.append(current_node)\n \n- ring.append({\n- 'index': index,\n- 'ip': fields[0],\n- 'status': fields[2],\n- 'state': fields[3],\n- 'load': load_bytes(float(fields[4]), fields[5]),\n- 'token': fields[7]\n- })\n- index += 1\n+ current_node = {'ip': line.strip()[1:]}\n+\n+ if line.strip().startswith('STATUS'):\n+ current_node['ready'] = 'NORMAL' in line\n+ current_node['token'] = line.split(',')[-1]\n+\n+ if line.strip().startswith('LOAD'):\n+ current_node['load'] = float(line.split(':')[-1])\n+\n+ if current_node is not None:\n+ nodes.append(current_node)\n+\n+ if not nodes:\n+ raise Exception('Unable to collect gossip for any nodes')\n \n- assert len(ring) > 0\n+ required_fields = ['ip', 'ready', 'load', 'token']\n+ for node in nodes:\n+ for required_field in required_fields:\n+ if required_field not in node:\n+ raise Exception('Unable to parse all fields for {}'.format(node))\n+\n+ return nodes\n+\n+\n+def get_ring(gossip):\n+ \"\"\" Return the ring status in a structured way.\n+\n+ Args:\n+ gossip: A list of gossip info for each node.\n+\n+ Returns:\n+ A list of nodes represented by dictionaries.\n+ \"\"\"\n+ nodes = sorted(gossip, key=lambda node: node['token'])\n+ for index, node in enumerate(nodes):\n+ node['index'] = index\n+\n+ if not nodes:\n+ raise Exception('Unable to find nodes in ring')\n \n # Calculate skew and diff for each node in ring.\n- ideal_load = sum(node['load'] for node in ring) / len(ring)\n- for index, node in enumerate(ring):\n+ ideal_load = sum(node['load'] for node in nodes) / len(nodes)\n+ for index, node in enumerate(nodes):\n try:\n node['skew'] = abs(node['load'] - ideal_load) / ideal_load\n except ZeroDivisionError:\n node['skew'] = 0\n- node['diff'] = abs(node['load'] - ring[index - 1]['load'])\n \n- return ring\n+ node['diff'] = abs(node['load'] - nodes[index - 1]['load'])\n+\n+ return nodes\n \n \n def equalize(node1, node2):\n@@ -142,20 +149,35 @@ def equalize(node1, node2):\n \n def main():\n logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)\n- logger.info('Fetching status')\n- status = get_status()\n \n- # All nodes must have just one token.\n- assert {node['tokens'] for node in status} == {1}\n+ parser = argparse.ArgumentParser()\n+ parser.add_argument(\n+ '--skip-tokens-check', action='store_true',\n+ help='Assume that all nodes own one token')\n+ parser.add_argument(\n+ '--skip-ownership-check', action='store_true',\n+ help='Assume that the node count exceeds the replication factor')\n+ args = parser.parse_args()\n+\n+ if not args.skip_tokens_check or not args.skip_ownership_check:\n+ logger.info('Fetching status')\n+ status = get_status()\n+\n+ if (not args.skip_tokens_check and\n+ any(node['tokens'] != 1 for node in status)):\n+ raise Exception('All nodes must have exactly one token')\n+\n+ if (not args.skip_ownership_check and\n+ any(node['owns'] != float(100) for node in status)):\n+ raise Exception('All nodes already own every key')\n \n- # There must be more than one node up to balance.\n- assert len([node for node in status if node['state'] == 'UN']) > 1\n+ logger.info('Fetching gossip')\n+ gossip = get_gossip()\n \n- # If all nodes own everything, a rebalance is not possible.\n- assert {node['owns'] for node in status} != {float(100)}\n+ if sum(node['ready'] for node in gossip) <= 1:\n+ raise Exception('There must be more than one node up to balance')\n \n- logger.info('Fetching ring')\n- ring = get_ring()\n+ ring = get_ring(gossip)\n if max(node['skew'] for node in ring) < MAX_DRIFT:\n logger.info('All nodes within {}% of ideal load'.format(MAX_DRIFT * 100))\n return\n" } ]
d78f837f3d0081832f70ab9483fd698ddbd919fb
appscale/gts
12.01.2019 22:13:51
Apache License 2.0
Stop routing deleted versions This ensures that deleted versions are absent from the HAProxy configuration file when HAProxy is reloaded.
[ { "change_type": "MODIFY", "old_path": "AdminServer/appscale/admin/routing/haproxy.py", "new_path": "AdminServer/appscale/admin/routing/haproxy.py", "diff": "@@ -16,6 +16,11 @@ logger = logging.getLogger('appscale-admin')\n CONFIG_DIR = os.path.join('/', 'etc', 'haproxy')\n \n \n+class InvalidConfig(Exception):\n+ \"\"\" Indicates that a given HAProxy configuration cannot be enforced. \"\"\"\n+ pass\n+\n+\n class HAProxyAppVersion(object):\n \"\"\" Represents a version's HAProxy configuration. \"\"\"\n \n@@ -110,6 +115,14 @@ class HAProxy(object):\n Returns:\n A string containing a complete HAProxy configuration.\n \"\"\"\n+ unique_ports = set()\n+ for version_key, version in self.versions.items():\n+ if version.port in unique_ports:\n+ raise InvalidConfig('Port {} is used by more than one '\n+ 'version'.format(version.port))\n+\n+ unique_ports.add(version.port)\n+\n version_blocks = [self.versions[key].block\n for key in sorted(self.versions.keys())\n if self.versions[key].block]\n@@ -136,7 +149,11 @@ class HAProxy(object):\n yield gen.sleep(wait_time)\n self.last_reload = time.time()\n \n- new_content = self.config\n+ try:\n+ new_content = self.config\n+ except InvalidConfig as error:\n+ logger.error(str(error))\n+ return\n \n try:\n with open(self.APP_CONFIG, 'r') as app_config_file:\n" }, { "change_type": "MODIFY", "old_path": "AdminServer/appscale/admin/routing/routing_manager.py", "new_path": "AdminServer/appscale/admin/routing/routing_manager.py", "diff": "@@ -27,7 +27,7 @@ class VersionRoutingManager(object):\n haproxy: An HAProxy object.\n \"\"\"\n # Indicates that the watch is still needed.\n- self.active = True\n+ self._active = True\n \n self._version_key = version_key\n self._haproxy = haproxy\n@@ -46,6 +46,14 @@ class VersionRoutingManager(object):\n project_id, service_id, version_id)\n self._zk_client.DataWatch(version_node, self._update_version_watch)\n \n+ def stop(self):\n+ \"\"\" Stops routing all instances for the version. \"\"\"\n+ self._active = False\n+ self._instances = []\n+ self._port = None\n+ self._max_connections = None\n+ self._update_version_block()\n+\n def _update_instances(self, instances):\n \"\"\" Handles changes to list of registered instances.\n \n@@ -61,7 +69,7 @@ class VersionRoutingManager(object):\n Args:\n versions: A list of strings specifying registered instances.\n \"\"\"\n- if not self.active:\n+ if not self._active:\n return False\n \n IOLoop.instance().add_callback(self._update_instances, instances)\n@@ -72,14 +80,14 @@ class VersionRoutingManager(object):\n Args:\n encoded_version: A JSON-encoded string containing version details.\n \"\"\"\n- try:\n- version_details = json.loads(encoded_version)\n- except (TypeError, ValueError):\n+ if encoded_version is None:\n self._port = None\n self._max_connections = None\n- logger.warning('Invalid version details: {}'.format(encoded_version))\n+ self._update_version_block()\n return\n \n+ version_details = json.loads(encoded_version)\n+\n # If the threadsafe value is not defined, the application can handle\n # concurrent requests.\n threadsafe = version_details.get('threadsafe', True)\n@@ -98,7 +106,10 @@ class VersionRoutingManager(object):\n \n # If the port or max_connections is not known, it's not possible to route\n # the version.\n- if not self._port or not self._max_connections:\n+ if (self._port is None or self._max_connections is None or\n+ not self._instances):\n+ self._haproxy.versions.pop(self._version_key, None)\n+ self._haproxy.reload()\n return\n \n if self._version_key not in self._haproxy.versions:\n@@ -117,7 +128,7 @@ class VersionRoutingManager(object):\n Args:\n version_details: A JSON-encoded string containing version details.\n \"\"\"\n- if not self.active:\n+ if not self._active:\n return False\n \n IOLoop.instance().add_callback(self._update_version, version_details)\n@@ -156,7 +167,7 @@ class RoutingManager(object):\n to_stop = [version for version in self._versions\n if version not in new_version_list]\n for version_key in to_stop:\n- self._versions[version_key].active = False\n+ self._versions[version_key].stop()\n del self._versions[version_key]\n \n for version_key in new_version_list:\n" } ]
9a9d19a0b6e4be31a36ebb21a3e8cf0b4650bb6a
appscale/gts
18.03.2019 13:16:07
Apache License 2.0
Move protobuffer handling to different level Parsing the commit request and populating the response should happen at the protobuffer interface level rather than in DatastoreDistributed.
[ { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/datastore_distributed.py", "new_path": "AppDB/appscale/datastore/datastore_distributed.py", "diff": "@@ -3137,38 +3137,6 @@ class DatastoreDistributed():\n IOLoop.current().spawn_callback(self.enqueue_transactional_tasks, app,\n metadata['tasks'])\n \n- @gen.coroutine\n- def commit_transaction(self, app_id, http_request_data):\n- \"\"\" Handles the commit phase of a transaction.\n-\n- Args:\n- app_id: The application ID requesting the transaction commit.\n- http_request_data: The encoded request of datastore_pb.Transaction.\n- Returns:\n- An encoded protocol buffer commit response.\n- \"\"\"\n- transaction_pb = datastore_pb.Transaction(http_request_data)\n- txn_id = transaction_pb.handle()\n-\n- try:\n- yield self.apply_txn_changes(app_id, txn_id)\n- except (dbconstants.TxTimeoutException, dbconstants.Timeout) as timeout:\n- raise gen.Return(('', datastore_pb.Error.TIMEOUT, str(timeout)))\n- except dbconstants.AppScaleDBConnectionError:\n- self.logger.exception('DB connection error during commit')\n- raise gen.Return(\n- ('', datastore_pb.Error.INTERNAL_ERROR,\n- 'Datastore connection error on Commit request.'))\n- except dbconstants.ConcurrentModificationException as error:\n- raise gen.Return(\n- ('', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)))\n- except (dbconstants.TooManyGroupsException,\n- dbconstants.BadRequest) as error:\n- raise gen.Return(('', datastore_pb.Error.BAD_REQUEST, str(error)))\n-\n- commitres_pb = datastore_pb.CommitResponse()\n- raise gen.Return((commitres_pb.Encode(), 0, ''))\n-\n def rollback_transaction(self, app_id, txid):\n \"\"\" Handles the rollback phase of a transaction.\n \n" }, { "change_type": "MODIFY", "old_path": "AppDB/appscale/datastore/scripts/datastore.py", "new_path": "AppDB/appscale/datastore/scripts/datastore.py", "diff": "@@ -339,9 +339,26 @@ class MainHandler(tornado.web.RequestHandler):\n ('', datastore_pb.Error.CAPABILITY_DISABLED,\n 'Datastore is in read-only mode.'))\n \n- result = yield datastore_access.commit_transaction(\n- app_id, http_request_data)\n- raise gen.Return(result)\n+ txid = datastore_pb.Transaction(http_request_data).handle()\n+\n+ try:\n+ yield datastore_access.apply_txn_changes(app_id, txid)\n+ except (dbconstants.TxTimeoutException, dbconstants.Timeout) as timeout:\n+ raise gen.Return(('', datastore_pb.Error.TIMEOUT, str(timeout)))\n+ except dbconstants.AppScaleDBConnectionError:\n+ logger.exception('DB connection error during commit')\n+ raise gen.Return(\n+ ('', datastore_pb.Error.INTERNAL_ERROR,\n+ 'Datastore connection error on Commit request.'))\n+ except dbconstants.ConcurrentModificationException as error:\n+ raise gen.Return(\n+ ('', datastore_pb.Error.CONCURRENT_TRANSACTION, str(error)))\n+ except (dbconstants.TooManyGroupsException,\n+ dbconstants.BadRequest) as error:\n+ raise gen.Return(('', datastore_pb.Error.BAD_REQUEST, str(error)))\n+\n+ commitres_pb = datastore_pb.CommitResponse()\n+ raise gen.Return((commitres_pb.Encode(), 0, ''))\n \n def rollback_transaction_request(self, app_id, http_request_data):\n \"\"\" Handles the rollback phase of a transaction.\n" }, { "change_type": "MODIFY", "old_path": "AppDB/test/unit/test_datastore_server.py", "new_path": "AppDB/test/unit/test_datastore_server.py", "diff": "@@ -281,26 +281,6 @@ class TestDatastoreServer(testing.AsyncTestCase):\n })\n self.assertEqual(fetched[1], ['test\\x00blah\\x00test_kind:bob\\x01'])\n \n- @testing.gen_test\n- def test_commit_transaction(self):\n- db_batch = flexmock()\n- db_batch.should_receive('valid_data_version_sync').and_return(True)\n-\n- zk_client = flexmock()\n- zk_client.should_receive('add_listener')\n-\n- zookeeper = flexmock(handle=zk_client)\n- transaction_manager = flexmock(\n- delete_transaction_id=lambda project, txid: None)\n- dd = DatastoreDistributed(db_batch, transaction_manager, zookeeper)\n- flexmock(dd).should_receive('apply_txn_changes').and_return(ASYNC_NONE)\n- commit_request = datastore_pb.Transaction()\n- commit_request.set_handle(123)\n- commit_request.set_app(\"aaa\")\n- http_request = commit_request.Encode()\n- result = yield dd.commit_transaction(\"app_id\", http_request)\n- self.assertEquals(result, (datastore_pb.CommitResponse().Encode(), 0, \"\"))\n-\n def test_rollback_transcation(self):\n db_batch = flexmock()\n db_batch.should_receive('valid_data_version_sync').and_return(True)\n" } ]
6b41cdad4023a21c21dbb78f9bacfbfe5bcf9e8f
appscale/gts
22.07.2019 19:31:05
Apache License 2.0
Add service account name field to backup form This allows users to schedule a mapreduce-backed job with a custom service account name for backing up to an arbitrary GCS account.
[ { "change_type": "MODIFY", "old_path": "AppServer/google/appengine/ext/datastore_admin/backup_handler.py", "new_path": "AppServer/google/appengine/ext/datastore_admin/backup_handler.py", "diff": "@@ -675,7 +675,8 @@ def _perform_backup(run_as_a_service, kinds, selected_namespace,\n \n if not gcs_path_prefix:\n raise BackupValidationError('GCS path missing.')\n- bucket_name, path_prefix = validate_and_split_gcs_path(gcs_path_prefix)\n+ bucket_name, path_prefix = validate_and_split_gcs_path(\n+ gcs_path_prefix, mapper_params['account_id'])\n mapper_params['gs_bucket_name'] = (\n '%s/%s' % (bucket_name, path_prefix)).rstrip('/')\n naming_format = '$name/$id/output-$num'\n@@ -809,6 +810,12 @@ class DoBackupHandler(BaseDoHandler):\n if BackupInformation.name_exists(backup):\n raise BackupValidationError('Backup \"%s\" already exists.' % backup)\n mapper_params = _get_basic_mapper_params(self)\n+\n+ # AppScale: Use custom service account if specified.\n+ account_id = self.request.get('service_account_name', None)\n+ mapper_params['account_id'] = account_id\n+ mapper_params['tmp_account_id'] = account_id\n+\n backup_result = _perform_backup(\n self.request.get('run_as_a_service', False),\n self.request.get_all('kind'),\n@@ -1253,12 +1260,14 @@ def BackupCompleteHandler(operation, job_id, mapreduce_state):\n mapreduce_spec.params['backup_info_pk'],\n _get_gcs_path_prefix_from_params_dict(mapreduce_spec.mapper.params),\n filenames,\n- mapreduce_spec.params.get('done_callback_queue'))\n+ mapreduce_spec.params.get('done_callback_queue'),\n+ mapreduce_spec.mapper.params['output_writer']['account_id'])\n \n \n @db.transactional\n def _perform_backup_complete(\n- operation, job_id, kind, backup_info_pk, gcs_path_prefix, filenames, queue):\n+ operation, job_id, kind, backup_info_pk, gcs_path_prefix, filenames, queue,\n+ account_id=None):\n backup_info = BackupInformation.get(backup_info_pk)\n if backup_info:\n if job_id in backup_info.active_jobs:\n@@ -1277,6 +1286,7 @@ def _perform_backup_complete(\n if operation.status == utils.DatastoreAdminOperation.STATUS_COMPLETED:\n deferred.defer(finalize_backup_info, backup_info.key(),\n gcs_path_prefix,\n+ account_id,\n _url=config.DEFERRED_PATH,\n _queue=queue,\n _transactional=True)\n@@ -1284,7 +1294,7 @@ def _perform_backup_complete(\n logging.warn('BackupInfo was not found for %s', backup_info_pk)\n \n \n-def finalize_backup_info(backup_info_pk, gcs_path_prefix):\n+def finalize_backup_info(backup_info_pk, gcs_path_prefix, account_id=None):\n \"\"\"Finalize the state of BackupInformation and creates info file for GS.\"\"\"\n \n def get_backup_info():\n@@ -1301,7 +1311,8 @@ def finalize_backup_info(backup_info_pk, gcs_path_prefix):\n \n \n \n- gs_handle = BackupInfoWriter(gcs_path_prefix).write(backup_info)[0]\n+ backup_info_writer = BackupInfoWriter(gcs_path_prefix, account_id)\n+ gs_handle = backup_info_writer.write(backup_info)[0]\n \n def set_backup_info_with_finalize_info():\n backup_info = get_backup_info()\n@@ -1326,13 +1337,14 @@ def parse_backup_info_file(content):\n class BackupInfoWriter(object):\n \"\"\"A class for writing Datastore backup metadata files.\"\"\"\n \n- def __init__(self, gcs_path_prefix):\n+ def __init__(self, gcs_path_prefix, account_id=None):\n \"\"\"Construct a BackupInfoWriter.\n \n Args:\n gcs_path_prefix: (string) gcs prefix used for creating the backup.\n \"\"\"\n self.__gcs_path_prefix = gcs_path_prefix\n+ self._account_id = account_id\n \n def write(self, backup_info):\n \"\"\"Write the metadata files for the given backup_info.\n@@ -1364,7 +1376,7 @@ class BackupInfoWriter(object):\n \"\"\"\n filename = self._generate_filename(backup_info, '.backup_info')\n backup_info.gs_handle = filename\n- with GCSUtil.open(filename, 'w') as info_file:\n+ with GCSUtil.open(filename, 'w', _account_id=self._account_id) as info_file:\n with records.RecordsWriter(info_file) as writer:\n \n writer.write('1')\n@@ -1397,7 +1409,7 @@ class BackupInfoWriter(object):\n backup = self._create_kind_backup(backup_info, kind_backup_files)\n filename = self._generate_filename(\n backup_info, '.%s.backup_info' % kind_backup_files.backup_kind)\n- self._write_kind_backup_info_file(filename, backup)\n+ self._write_kind_backup_info_file(filename, backup, self._account_id)\n filenames.append(filename)\n return filenames\n \n@@ -1425,14 +1437,14 @@ class BackupInfoWriter(object):\n return backup\n \n @classmethod\n- def _write_kind_backup_info_file(cls, filename, backup):\n+ def _write_kind_backup_info_file(cls, filename, backup, account_id=None):\n \"\"\"Writes a kind backup_info.\n \n Args:\n filename: The name of the file to be created as string.\n backup: apphosting.ext.datastore_admin.Backup proto.\n \"\"\"\n- with GCSUtil.open(filename, 'w') as f:\n+ with GCSUtil.open(filename, 'w', _account_id=account_id) as f:\n f.write(backup.SerializeToString())\n \n \n@@ -1948,7 +1960,7 @@ def is_accessible_bucket_name(bucket_name):\n return result and result.status_code == 200\n \n \n-def verify_bucket_writable(bucket_name):\n+def verify_bucket_writable(bucket_name, account_id=None):\n \"\"\"Verify the application can write to the specified bucket.\n \n Args:\n@@ -1959,7 +1971,8 @@ def verify_bucket_writable(bucket_name):\n \"\"\"\n path = '/gs/%s/%s' % (bucket_name, TEST_WRITE_FILENAME_PREFIX)\n try:\n- gcs_stats = GCSUtil.listbucket(path, max_keys=MAX_KEYS_LIST_SIZE)\n+ gcs_stats = GCSUtil.listbucket(path, max_keys=MAX_KEYS_LIST_SIZE,\n+ _account_id=account_id)\n file_names = [f.filename for f in gcs_stats]\n except (cloudstorage.AuthorizationError, cloudstorage.ForbiddenError):\n raise BackupValidationError('Bucket \"%s\" not accessible' % bucket_name)\n@@ -1981,12 +1994,12 @@ def verify_bucket_writable(bucket_name):\n (bucket_name, TEST_WRITE_FILENAME_PREFIX, gen))\n file_name_try += 1\n try:\n- with GCSUtil.open(file_name, 'w') as f:\n+ with GCSUtil.open(file_name, 'w', _account_id=account_id) as f:\n f.write('test')\n except cloudstorage.ForbiddenError:\n raise BackupValidationError('Bucket \"%s\" is not writable' % bucket_name)\n try:\n- GCSUtil.delete(file_name)\n+ GCSUtil.delete(file_name, _account_id=account_id)\n except cloudstorage.Error:\n logging.warn('Failed to delete test file %s', file_name)\n \n@@ -2016,11 +2029,11 @@ def parse_gs_handle(gs_handle):\n return (tokens[0], '') if len(tokens) == 1 else tuple(tokens)\n \n \n-def validate_and_split_gcs_path(gcs_path):\n+def validate_and_split_gcs_path(gcs_path, account_id=None):\n bucket_name, path = parse_gs_handle(gcs_path)\n path = path.rstrip('/')\n validate_gcs_bucket_name(bucket_name)\n- verify_bucket_writable(bucket_name)\n+ verify_bucket_writable(bucket_name, account_id)\n return bucket_name, path\n \n \n" }, { "change_type": "MODIFY", "old_path": "AppServer/google/appengine/ext/datastore_admin/templates/confirm_backup.html", "new_path": "AppServer/google/appengine/ext/datastore_admin/templates/confirm_backup.html", "diff": "@@ -93,6 +93,14 @@\n <input type=\"text\" id=\"gs_bucket_name\" name=\"gs_bucket_name\" value=\"\" />\n </td>\n </tr>\n+ <tr id=\"gs_service_account_tr\">\n+ <td>\n+ Service account name\n+ <img class=\"ae-help-icon\" src=\"{{ base_path }}/static/img/help.gif\" height=\"14\" width=\"14\" alt=\"help\"\n+ title=\"Use the client_email field from any custom service accounts you've defined.\">\n+ <input name=\"service_account_name\">\n+ </td>\n+ </tr>\n </table>\n \n <table style=\"margin-top: 1em;\"><tr>\n" } ]